2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
34 #include <rdma/ib_umem.h>
35 #include <rdma/ib_cache.h>
36 #include <rdma/ib_user_verbs.h>
37 #include <rdma/rdma_counter.h>
38 #include <linux/mlx5/fs.h>
45 MLX5_IB_ACK_REQ_FREQ
= 8,
49 MLX5_IB_DEFAULT_SCHED_QUEUE
= 0x83,
50 MLX5_IB_DEFAULT_QP0_SCHED_QUEUE
= 0x3f,
51 MLX5_IB_LINK_TYPE_IB
= 0,
52 MLX5_IB_LINK_TYPE_ETH
= 1
56 MLX5_IB_SQ_STRIDE
= 6,
57 MLX5_IB_SQ_UMR_INLINE_THRESHOLD
= 64,
60 static const u32 mlx5_ib_opcode
[] = {
61 [IB_WR_SEND
] = MLX5_OPCODE_SEND
,
62 [IB_WR_LSO
] = MLX5_OPCODE_LSO
,
63 [IB_WR_SEND_WITH_IMM
] = MLX5_OPCODE_SEND_IMM
,
64 [IB_WR_RDMA_WRITE
] = MLX5_OPCODE_RDMA_WRITE
,
65 [IB_WR_RDMA_WRITE_WITH_IMM
] = MLX5_OPCODE_RDMA_WRITE_IMM
,
66 [IB_WR_RDMA_READ
] = MLX5_OPCODE_RDMA_READ
,
67 [IB_WR_ATOMIC_CMP_AND_SWP
] = MLX5_OPCODE_ATOMIC_CS
,
68 [IB_WR_ATOMIC_FETCH_AND_ADD
] = MLX5_OPCODE_ATOMIC_FA
,
69 [IB_WR_SEND_WITH_INV
] = MLX5_OPCODE_SEND_INVAL
,
70 [IB_WR_LOCAL_INV
] = MLX5_OPCODE_UMR
,
71 [IB_WR_REG_MR
] = MLX5_OPCODE_UMR
,
72 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP
] = MLX5_OPCODE_ATOMIC_MASKED_CS
,
73 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
] = MLX5_OPCODE_ATOMIC_MASKED_FA
,
74 [MLX5_IB_WR_UMR
] = MLX5_OPCODE_UMR
,
77 struct mlx5_wqe_eth_pad
{
81 enum raw_qp_set_mask_map
{
82 MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID
= 1UL << 0,
83 MLX5_RAW_QP_RATE_LIMIT
= 1UL << 1,
86 struct mlx5_modify_raw_qp_param
{
89 u32 set_mask
; /* raw_qp_set_mask_map */
91 struct mlx5_rate_limit rl
;
97 static void get_cqs(enum ib_qp_type qp_type
,
98 struct ib_cq
*ib_send_cq
, struct ib_cq
*ib_recv_cq
,
99 struct mlx5_ib_cq
**send_cq
, struct mlx5_ib_cq
**recv_cq
);
101 static int is_qp0(enum ib_qp_type qp_type
)
103 return qp_type
== IB_QPT_SMI
;
106 static int is_sqp(enum ib_qp_type qp_type
)
108 return is_qp0(qp_type
) || is_qp1(qp_type
);
112 * mlx5_ib_read_user_wqe_common() - Copy a WQE (or part of) from user WQ
115 * @umem: User space memory where the WQ is
116 * @buffer: buffer to copy to
117 * @buflen: buffer length
118 * @wqe_index: index of WQE to copy from
119 * @wq_offset: offset to start of WQ
120 * @wq_wqe_cnt: number of WQEs in WQ
121 * @wq_wqe_shift: log2 of WQE size
122 * @bcnt: number of bytes to copy
123 * @bytes_copied: number of bytes to copy (return value)
125 * Copies from start of WQE bcnt or less bytes.
126 * Does not gurantee to copy the entire WQE.
128 * Return: zero on success, or an error code.
130 static int mlx5_ib_read_user_wqe_common(struct ib_umem
*umem
, void *buffer
,
131 size_t buflen
, int wqe_index
,
132 int wq_offset
, int wq_wqe_cnt
,
133 int wq_wqe_shift
, int bcnt
,
134 size_t *bytes_copied
)
136 size_t offset
= wq_offset
+ ((wqe_index
% wq_wqe_cnt
) << wq_wqe_shift
);
137 size_t wq_end
= wq_offset
+ (wq_wqe_cnt
<< wq_wqe_shift
);
141 /* don't copy more than requested, more than buffer length or
144 copy_length
= min_t(u32
, buflen
, wq_end
- offset
);
145 copy_length
= min_t(u32
, copy_length
, bcnt
);
147 ret
= ib_umem_copy_from(buffer
, umem
, offset
, copy_length
);
151 if (!ret
&& bytes_copied
)
152 *bytes_copied
= copy_length
;
157 static int mlx5_ib_read_kernel_wqe_sq(struct mlx5_ib_qp
*qp
, int wqe_index
,
158 void *buffer
, size_t buflen
, size_t *bc
)
160 struct mlx5_wqe_ctrl_seg
*ctrl
;
161 size_t bytes_copied
= 0;
166 wqe_index
= wqe_index
& qp
->sq
.fbc
.sz_m1
;
168 /* read the control segment first */
169 p
= mlx5_frag_buf_get_wqe(&qp
->sq
.fbc
, wqe_index
);
171 ds
= be32_to_cpu(ctrl
->qpn_ds
) & MLX5_WQE_CTRL_DS_MASK
;
172 wqe_length
= ds
* MLX5_WQE_DS_UNITS
;
174 /* read rest of WQE if it spreads over more than one stride */
175 while (bytes_copied
< wqe_length
) {
177 min_t(size_t, buflen
- bytes_copied
, MLX5_SEND_WQE_BB
);
182 memcpy(buffer
+ bytes_copied
, p
, copy_length
);
183 bytes_copied
+= copy_length
;
185 wqe_index
= (wqe_index
+ 1) & qp
->sq
.fbc
.sz_m1
;
186 p
= mlx5_frag_buf_get_wqe(&qp
->sq
.fbc
, wqe_index
);
192 static int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp
*qp
, int wqe_index
,
193 void *buffer
, size_t buflen
, size_t *bc
)
195 struct mlx5_ib_qp_base
*base
= &qp
->trans_qp
.base
;
196 struct ib_umem
*umem
= base
->ubuffer
.umem
;
197 struct mlx5_ib_wq
*wq
= &qp
->sq
;
198 struct mlx5_wqe_ctrl_seg
*ctrl
;
200 size_t bytes_copied2
;
205 /* at first read as much as possible */
206 ret
= mlx5_ib_read_user_wqe_common(umem
, buffer
, buflen
, wqe_index
,
207 wq
->offset
, wq
->wqe_cnt
,
208 wq
->wqe_shift
, buflen
,
213 /* we need at least control segment size to proceed */
214 if (bytes_copied
< sizeof(*ctrl
))
218 ds
= be32_to_cpu(ctrl
->qpn_ds
) & MLX5_WQE_CTRL_DS_MASK
;
219 wqe_length
= ds
* MLX5_WQE_DS_UNITS
;
221 /* if we copied enough then we are done */
222 if (bytes_copied
>= wqe_length
) {
227 /* otherwise this a wrapped around wqe
228 * so read the remaining bytes starting
231 ret
= mlx5_ib_read_user_wqe_common(umem
, buffer
+ bytes_copied
,
232 buflen
- bytes_copied
, 0, wq
->offset
,
233 wq
->wqe_cnt
, wq
->wqe_shift
,
234 wqe_length
- bytes_copied
,
239 *bc
= bytes_copied
+ bytes_copied2
;
243 int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp
*qp
, int wqe_index
, void *buffer
,
244 size_t buflen
, size_t *bc
)
246 struct mlx5_ib_qp_base
*base
= &qp
->trans_qp
.base
;
247 struct ib_umem
*umem
= base
->ubuffer
.umem
;
249 if (buflen
< sizeof(struct mlx5_wqe_ctrl_seg
))
253 return mlx5_ib_read_kernel_wqe_sq(qp
, wqe_index
, buffer
,
256 return mlx5_ib_read_user_wqe_sq(qp
, wqe_index
, buffer
, buflen
, bc
);
259 static int mlx5_ib_read_user_wqe_rq(struct mlx5_ib_qp
*qp
, int wqe_index
,
260 void *buffer
, size_t buflen
, size_t *bc
)
262 struct mlx5_ib_qp_base
*base
= &qp
->trans_qp
.base
;
263 struct ib_umem
*umem
= base
->ubuffer
.umem
;
264 struct mlx5_ib_wq
*wq
= &qp
->rq
;
268 ret
= mlx5_ib_read_user_wqe_common(umem
, buffer
, buflen
, wqe_index
,
269 wq
->offset
, wq
->wqe_cnt
,
270 wq
->wqe_shift
, buflen
,
279 int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp
*qp
, int wqe_index
, void *buffer
,
280 size_t buflen
, size_t *bc
)
282 struct mlx5_ib_qp_base
*base
= &qp
->trans_qp
.base
;
283 struct ib_umem
*umem
= base
->ubuffer
.umem
;
284 struct mlx5_ib_wq
*wq
= &qp
->rq
;
285 size_t wqe_size
= 1 << wq
->wqe_shift
;
287 if (buflen
< wqe_size
)
293 return mlx5_ib_read_user_wqe_rq(qp
, wqe_index
, buffer
, buflen
, bc
);
296 static int mlx5_ib_read_user_wqe_srq(struct mlx5_ib_srq
*srq
, int wqe_index
,
297 void *buffer
, size_t buflen
, size_t *bc
)
299 struct ib_umem
*umem
= srq
->umem
;
303 ret
= mlx5_ib_read_user_wqe_common(umem
, buffer
, buflen
, wqe_index
, 0,
304 srq
->msrq
.max
, srq
->msrq
.wqe_shift
,
305 buflen
, &bytes_copied
);
313 int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq
*srq
, int wqe_index
, void *buffer
,
314 size_t buflen
, size_t *bc
)
316 struct ib_umem
*umem
= srq
->umem
;
317 size_t wqe_size
= 1 << srq
->msrq
.wqe_shift
;
319 if (buflen
< wqe_size
)
325 return mlx5_ib_read_user_wqe_srq(srq
, wqe_index
, buffer
, buflen
, bc
);
328 static void mlx5_ib_qp_event(struct mlx5_core_qp
*qp
, int type
)
330 struct ib_qp
*ibqp
= &to_mibqp(qp
)->ibqp
;
331 struct ib_event event
;
333 if (type
== MLX5_EVENT_TYPE_PATH_MIG
) {
334 /* This event is only valid for trans_qps */
335 to_mibqp(qp
)->port
= to_mibqp(qp
)->trans_qp
.alt_port
;
338 if (ibqp
->event_handler
) {
339 event
.device
= ibqp
->device
;
340 event
.element
.qp
= ibqp
;
342 case MLX5_EVENT_TYPE_PATH_MIG
:
343 event
.event
= IB_EVENT_PATH_MIG
;
345 case MLX5_EVENT_TYPE_COMM_EST
:
346 event
.event
= IB_EVENT_COMM_EST
;
348 case MLX5_EVENT_TYPE_SQ_DRAINED
:
349 event
.event
= IB_EVENT_SQ_DRAINED
;
351 case MLX5_EVENT_TYPE_SRQ_LAST_WQE
:
352 event
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
354 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR
:
355 event
.event
= IB_EVENT_QP_FATAL
;
357 case MLX5_EVENT_TYPE_PATH_MIG_FAILED
:
358 event
.event
= IB_EVENT_PATH_MIG_ERR
;
360 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR
:
361 event
.event
= IB_EVENT_QP_REQ_ERR
;
363 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR
:
364 event
.event
= IB_EVENT_QP_ACCESS_ERR
;
367 pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type
, qp
->qpn
);
371 ibqp
->event_handler(&event
, ibqp
->qp_context
);
375 static int set_rq_size(struct mlx5_ib_dev
*dev
, struct ib_qp_cap
*cap
,
376 int has_rq
, struct mlx5_ib_qp
*qp
, struct mlx5_ib_create_qp
*ucmd
)
381 /* Sanity check RQ size before proceeding */
382 if (cap
->max_recv_wr
> (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_qp_sz
)))
388 qp
->rq
.wqe_shift
= 0;
389 cap
->max_recv_wr
= 0;
390 cap
->max_recv_sge
= 0;
392 int wq_sig
= !!(qp
->flags_en
& MLX5_QP_FLAG_SIGNATURE
);
395 qp
->rq
.wqe_cnt
= ucmd
->rq_wqe_count
;
396 if (ucmd
->rq_wqe_shift
> BITS_PER_BYTE
* sizeof(ucmd
->rq_wqe_shift
))
398 qp
->rq
.wqe_shift
= ucmd
->rq_wqe_shift
;
399 if ((1 << qp
->rq
.wqe_shift
) /
400 sizeof(struct mlx5_wqe_data_seg
) <
404 (1 << qp
->rq
.wqe_shift
) /
405 sizeof(struct mlx5_wqe_data_seg
) -
407 qp
->rq
.max_post
= qp
->rq
.wqe_cnt
;
410 wq_sig
? sizeof(struct mlx5_wqe_signature_seg
) :
412 wqe_size
+= cap
->max_recv_sge
* sizeof(struct mlx5_wqe_data_seg
);
413 wqe_size
= roundup_pow_of_two(wqe_size
);
414 wq_size
= roundup_pow_of_two(cap
->max_recv_wr
) * wqe_size
;
415 wq_size
= max_t(int, wq_size
, MLX5_SEND_WQE_BB
);
416 qp
->rq
.wqe_cnt
= wq_size
/ wqe_size
;
417 if (wqe_size
> MLX5_CAP_GEN(dev
->mdev
, max_wqe_sz_rq
)) {
418 mlx5_ib_dbg(dev
, "wqe_size %d, max %d\n",
420 MLX5_CAP_GEN(dev
->mdev
,
424 qp
->rq
.wqe_shift
= ilog2(wqe_size
);
426 (1 << qp
->rq
.wqe_shift
) /
427 sizeof(struct mlx5_wqe_data_seg
) -
429 qp
->rq
.max_post
= qp
->rq
.wqe_cnt
;
436 static int sq_overhead(struct ib_qp_init_attr
*attr
)
440 switch (attr
->qp_type
) {
442 size
+= sizeof(struct mlx5_wqe_xrc_seg
);
445 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
446 max(sizeof(struct mlx5_wqe_atomic_seg
) +
447 sizeof(struct mlx5_wqe_raddr_seg
),
448 sizeof(struct mlx5_wqe_umr_ctrl_seg
) +
449 sizeof(struct mlx5_mkey_seg
) +
450 MLX5_IB_SQ_UMR_INLINE_THRESHOLD
/
451 MLX5_IB_UMR_OCTOWORD
);
458 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
459 max(sizeof(struct mlx5_wqe_raddr_seg
),
460 sizeof(struct mlx5_wqe_umr_ctrl_seg
) +
461 sizeof(struct mlx5_mkey_seg
));
465 if (attr
->create_flags
& IB_QP_CREATE_IPOIB_UD_LSO
)
466 size
+= sizeof(struct mlx5_wqe_eth_pad
) +
467 sizeof(struct mlx5_wqe_eth_seg
);
470 case MLX5_IB_QPT_HW_GSI
:
471 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
472 sizeof(struct mlx5_wqe_datagram_seg
);
475 case MLX5_IB_QPT_REG_UMR
:
476 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
477 sizeof(struct mlx5_wqe_umr_ctrl_seg
) +
478 sizeof(struct mlx5_mkey_seg
);
488 static int calc_send_wqe(struct ib_qp_init_attr
*attr
)
493 size
= sq_overhead(attr
);
497 if (attr
->cap
.max_inline_data
) {
498 inl_size
= size
+ sizeof(struct mlx5_wqe_inline_seg
) +
499 attr
->cap
.max_inline_data
;
502 size
+= attr
->cap
.max_send_sge
* sizeof(struct mlx5_wqe_data_seg
);
503 if (attr
->create_flags
& IB_QP_CREATE_INTEGRITY_EN
&&
504 ALIGN(max_t(int, inl_size
, size
), MLX5_SEND_WQE_BB
) < MLX5_SIG_WQE_SIZE
)
505 return MLX5_SIG_WQE_SIZE
;
507 return ALIGN(max_t(int, inl_size
, size
), MLX5_SEND_WQE_BB
);
510 static int get_send_sge(struct ib_qp_init_attr
*attr
, int wqe_size
)
514 if (attr
->qp_type
== IB_QPT_RC
)
515 max_sge
= (min_t(int, wqe_size
, 512) -
516 sizeof(struct mlx5_wqe_ctrl_seg
) -
517 sizeof(struct mlx5_wqe_raddr_seg
)) /
518 sizeof(struct mlx5_wqe_data_seg
);
519 else if (attr
->qp_type
== IB_QPT_XRC_INI
)
520 max_sge
= (min_t(int, wqe_size
, 512) -
521 sizeof(struct mlx5_wqe_ctrl_seg
) -
522 sizeof(struct mlx5_wqe_xrc_seg
) -
523 sizeof(struct mlx5_wqe_raddr_seg
)) /
524 sizeof(struct mlx5_wqe_data_seg
);
526 max_sge
= (wqe_size
- sq_overhead(attr
)) /
527 sizeof(struct mlx5_wqe_data_seg
);
529 return min_t(int, max_sge
, wqe_size
- sq_overhead(attr
) /
530 sizeof(struct mlx5_wqe_data_seg
));
533 static int calc_sq_size(struct mlx5_ib_dev
*dev
, struct ib_qp_init_attr
*attr
,
534 struct mlx5_ib_qp
*qp
)
539 if (!attr
->cap
.max_send_wr
)
542 wqe_size
= calc_send_wqe(attr
);
543 mlx5_ib_dbg(dev
, "wqe_size %d\n", wqe_size
);
547 if (wqe_size
> MLX5_CAP_GEN(dev
->mdev
, max_wqe_sz_sq
)) {
548 mlx5_ib_dbg(dev
, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
549 wqe_size
, MLX5_CAP_GEN(dev
->mdev
, max_wqe_sz_sq
));
553 qp
->max_inline_data
= wqe_size
- sq_overhead(attr
) -
554 sizeof(struct mlx5_wqe_inline_seg
);
555 attr
->cap
.max_inline_data
= qp
->max_inline_data
;
557 wq_size
= roundup_pow_of_two(attr
->cap
.max_send_wr
* wqe_size
);
558 qp
->sq
.wqe_cnt
= wq_size
/ MLX5_SEND_WQE_BB
;
559 if (qp
->sq
.wqe_cnt
> (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_qp_sz
))) {
560 mlx5_ib_dbg(dev
, "send queue size (%d * %d / %d -> %d) exceeds limits(%d)\n",
561 attr
->cap
.max_send_wr
, wqe_size
, MLX5_SEND_WQE_BB
,
563 1 << MLX5_CAP_GEN(dev
->mdev
, log_max_qp_sz
));
566 qp
->sq
.wqe_shift
= ilog2(MLX5_SEND_WQE_BB
);
567 qp
->sq
.max_gs
= get_send_sge(attr
, wqe_size
);
568 if (qp
->sq
.max_gs
< attr
->cap
.max_send_sge
)
571 attr
->cap
.max_send_sge
= qp
->sq
.max_gs
;
572 qp
->sq
.max_post
= wq_size
/ wqe_size
;
573 attr
->cap
.max_send_wr
= qp
->sq
.max_post
;
578 static int set_user_buf_size(struct mlx5_ib_dev
*dev
,
579 struct mlx5_ib_qp
*qp
,
580 struct mlx5_ib_create_qp
*ucmd
,
581 struct mlx5_ib_qp_base
*base
,
582 struct ib_qp_init_attr
*attr
)
584 int desc_sz
= 1 << qp
->sq
.wqe_shift
;
586 if (desc_sz
> MLX5_CAP_GEN(dev
->mdev
, max_wqe_sz_sq
)) {
587 mlx5_ib_warn(dev
, "desc_sz %d, max_sq_desc_sz %d\n",
588 desc_sz
, MLX5_CAP_GEN(dev
->mdev
, max_wqe_sz_sq
));
592 if (ucmd
->sq_wqe_count
&& !is_power_of_2(ucmd
->sq_wqe_count
)) {
593 mlx5_ib_warn(dev
, "sq_wqe_count %d is not a power of two\n",
598 qp
->sq
.wqe_cnt
= ucmd
->sq_wqe_count
;
600 if (qp
->sq
.wqe_cnt
> (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_qp_sz
))) {
601 mlx5_ib_warn(dev
, "wqe_cnt %d, max_wqes %d\n",
603 1 << MLX5_CAP_GEN(dev
->mdev
, log_max_qp_sz
));
607 if (attr
->qp_type
== IB_QPT_RAW_PACKET
||
608 qp
->flags
& IB_QP_CREATE_SOURCE_QPN
) {
609 base
->ubuffer
.buf_size
= qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
;
610 qp
->raw_packet_qp
.sq
.ubuffer
.buf_size
= qp
->sq
.wqe_cnt
<< 6;
612 base
->ubuffer
.buf_size
= (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
) +
613 (qp
->sq
.wqe_cnt
<< 6);
619 static int qp_has_rq(struct ib_qp_init_attr
*attr
)
621 if (attr
->qp_type
== IB_QPT_XRC_INI
||
622 attr
->qp_type
== IB_QPT_XRC_TGT
|| attr
->srq
||
623 attr
->qp_type
== MLX5_IB_QPT_REG_UMR
||
624 !attr
->cap
.max_recv_wr
)
631 /* this is the first blue flame register in the array of bfregs assigned
632 * to a processes. Since we do not use it for blue flame but rather
633 * regular 64 bit doorbells, we do not need a lock for maintaiing
636 NUM_NON_BLUE_FLAME_BFREGS
= 1,
639 static int max_bfregs(struct mlx5_ib_dev
*dev
, struct mlx5_bfreg_info
*bfregi
)
641 return get_num_static_uars(dev
, bfregi
) * MLX5_NON_FP_BFREGS_PER_UAR
;
644 static int num_med_bfreg(struct mlx5_ib_dev
*dev
,
645 struct mlx5_bfreg_info
*bfregi
)
649 n
= max_bfregs(dev
, bfregi
) - bfregi
->num_low_latency_bfregs
-
650 NUM_NON_BLUE_FLAME_BFREGS
;
652 return n
>= 0 ? n
: 0;
655 static int first_med_bfreg(struct mlx5_ib_dev
*dev
,
656 struct mlx5_bfreg_info
*bfregi
)
658 return num_med_bfreg(dev
, bfregi
) ? 1 : -ENOMEM
;
661 static int first_hi_bfreg(struct mlx5_ib_dev
*dev
,
662 struct mlx5_bfreg_info
*bfregi
)
666 med
= num_med_bfreg(dev
, bfregi
);
670 static int alloc_high_class_bfreg(struct mlx5_ib_dev
*dev
,
671 struct mlx5_bfreg_info
*bfregi
)
675 for (i
= first_hi_bfreg(dev
, bfregi
); i
< max_bfregs(dev
, bfregi
); i
++) {
676 if (!bfregi
->count
[i
]) {
685 static int alloc_med_class_bfreg(struct mlx5_ib_dev
*dev
,
686 struct mlx5_bfreg_info
*bfregi
)
688 int minidx
= first_med_bfreg(dev
, bfregi
);
694 for (i
= minidx
; i
< first_hi_bfreg(dev
, bfregi
); i
++) {
695 if (bfregi
->count
[i
] < bfregi
->count
[minidx
])
697 if (!bfregi
->count
[minidx
])
701 bfregi
->count
[minidx
]++;
705 static int alloc_bfreg(struct mlx5_ib_dev
*dev
,
706 struct mlx5_bfreg_info
*bfregi
)
708 int bfregn
= -ENOMEM
;
710 if (bfregi
->lib_uar_dyn
)
713 mutex_lock(&bfregi
->lock
);
714 if (bfregi
->ver
>= 2) {
715 bfregn
= alloc_high_class_bfreg(dev
, bfregi
);
717 bfregn
= alloc_med_class_bfreg(dev
, bfregi
);
721 BUILD_BUG_ON(NUM_NON_BLUE_FLAME_BFREGS
!= 1);
723 bfregi
->count
[bfregn
]++;
725 mutex_unlock(&bfregi
->lock
);
730 void mlx5_ib_free_bfreg(struct mlx5_ib_dev
*dev
, struct mlx5_bfreg_info
*bfregi
, int bfregn
)
732 mutex_lock(&bfregi
->lock
);
733 bfregi
->count
[bfregn
]--;
734 mutex_unlock(&bfregi
->lock
);
737 static enum mlx5_qp_state
to_mlx5_state(enum ib_qp_state state
)
740 case IB_QPS_RESET
: return MLX5_QP_STATE_RST
;
741 case IB_QPS_INIT
: return MLX5_QP_STATE_INIT
;
742 case IB_QPS_RTR
: return MLX5_QP_STATE_RTR
;
743 case IB_QPS_RTS
: return MLX5_QP_STATE_RTS
;
744 case IB_QPS_SQD
: return MLX5_QP_STATE_SQD
;
745 case IB_QPS_SQE
: return MLX5_QP_STATE_SQER
;
746 case IB_QPS_ERR
: return MLX5_QP_STATE_ERR
;
751 static int to_mlx5_st(enum ib_qp_type type
)
754 case IB_QPT_RC
: return MLX5_QP_ST_RC
;
755 case IB_QPT_UC
: return MLX5_QP_ST_UC
;
756 case IB_QPT_UD
: return MLX5_QP_ST_UD
;
757 case MLX5_IB_QPT_REG_UMR
: return MLX5_QP_ST_REG_UMR
;
759 case IB_QPT_XRC_TGT
: return MLX5_QP_ST_XRC
;
760 case IB_QPT_SMI
: return MLX5_QP_ST_QP0
;
761 case MLX5_IB_QPT_HW_GSI
: return MLX5_QP_ST_QP1
;
762 case MLX5_IB_QPT_DCI
: return MLX5_QP_ST_DCI
;
763 case IB_QPT_RAW_PACKET
: return MLX5_QP_ST_RAW_ETHERTYPE
;
764 default: return -EINVAL
;
768 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq
*send_cq
,
769 struct mlx5_ib_cq
*recv_cq
);
770 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq
*send_cq
,
771 struct mlx5_ib_cq
*recv_cq
);
773 int bfregn_to_uar_index(struct mlx5_ib_dev
*dev
,
774 struct mlx5_bfreg_info
*bfregi
, u32 bfregn
,
777 unsigned int bfregs_per_sys_page
;
778 u32 index_of_sys_page
;
781 if (bfregi
->lib_uar_dyn
)
784 bfregs_per_sys_page
= get_uars_per_sys_page(dev
, bfregi
->lib_uar_4k
) *
785 MLX5_NON_FP_BFREGS_PER_UAR
;
786 index_of_sys_page
= bfregn
/ bfregs_per_sys_page
;
789 index_of_sys_page
+= bfregi
->num_static_sys_pages
;
791 if (index_of_sys_page
>= bfregi
->num_sys_pages
)
794 if (bfregn
> bfregi
->num_dyn_bfregs
||
795 bfregi
->sys_pages
[index_of_sys_page
] == MLX5_IB_INVALID_UAR_INDEX
) {
796 mlx5_ib_dbg(dev
, "Invalid dynamic uar index\n");
801 offset
= bfregn
% bfregs_per_sys_page
/ MLX5_NON_FP_BFREGS_PER_UAR
;
802 return bfregi
->sys_pages
[index_of_sys_page
] + offset
;
805 static int mlx5_ib_umem_get(struct mlx5_ib_dev
*dev
, struct ib_udata
*udata
,
806 unsigned long addr
, size_t size
,
807 struct ib_umem
**umem
, int *npages
, int *page_shift
,
808 int *ncont
, u32
*offset
)
812 *umem
= ib_umem_get(&dev
->ib_dev
, addr
, size
, 0);
814 mlx5_ib_dbg(dev
, "umem_get failed\n");
815 return PTR_ERR(*umem
);
818 mlx5_ib_cont_pages(*umem
, addr
, 0, npages
, page_shift
, ncont
, NULL
);
820 err
= mlx5_ib_get_buf_offset(addr
, *page_shift
, offset
);
822 mlx5_ib_warn(dev
, "bad offset\n");
826 mlx5_ib_dbg(dev
, "addr 0x%lx, size %zu, npages %d, page_shift %d, ncont %d, offset %d\n",
827 addr
, size
, *npages
, *page_shift
, *ncont
, *offset
);
832 ib_umem_release(*umem
);
838 static void destroy_user_rq(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
839 struct mlx5_ib_rwq
*rwq
, struct ib_udata
*udata
)
841 struct mlx5_ib_ucontext
*context
=
842 rdma_udata_to_drv_context(
844 struct mlx5_ib_ucontext
,
847 if (rwq
->create_flags
& MLX5_IB_WQ_FLAGS_DELAY_DROP
)
848 atomic_dec(&dev
->delay_drop
.rqs_cnt
);
850 mlx5_ib_db_unmap_user(context
, &rwq
->db
);
851 ib_umem_release(rwq
->umem
);
854 static int create_user_rq(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
855 struct ib_udata
*udata
, struct mlx5_ib_rwq
*rwq
,
856 struct mlx5_ib_create_wq
*ucmd
)
858 struct mlx5_ib_ucontext
*ucontext
= rdma_udata_to_drv_context(
859 udata
, struct mlx5_ib_ucontext
, ibucontext
);
869 rwq
->umem
= ib_umem_get(&dev
->ib_dev
, ucmd
->buf_addr
, rwq
->buf_size
, 0);
870 if (IS_ERR(rwq
->umem
)) {
871 mlx5_ib_dbg(dev
, "umem_get failed\n");
872 err
= PTR_ERR(rwq
->umem
);
876 mlx5_ib_cont_pages(rwq
->umem
, ucmd
->buf_addr
, 0, &npages
, &page_shift
,
878 err
= mlx5_ib_get_buf_offset(ucmd
->buf_addr
, page_shift
,
879 &rwq
->rq_page_offset
);
881 mlx5_ib_warn(dev
, "bad offset\n");
885 rwq
->rq_num_pas
= ncont
;
886 rwq
->page_shift
= page_shift
;
887 rwq
->log_page_size
= page_shift
- MLX5_ADAPTER_PAGE_SHIFT
;
888 rwq
->wq_sig
= !!(ucmd
->flags
& MLX5_WQ_FLAG_SIGNATURE
);
890 mlx5_ib_dbg(dev
, "addr 0x%llx, size %zd, npages %d, page_shift %d, ncont %d, offset %d\n",
891 (unsigned long long)ucmd
->buf_addr
, rwq
->buf_size
,
892 npages
, page_shift
, ncont
, offset
);
894 err
= mlx5_ib_db_map_user(ucontext
, udata
, ucmd
->db_addr
, &rwq
->db
);
896 mlx5_ib_dbg(dev
, "map failed\n");
903 ib_umem_release(rwq
->umem
);
907 static int adjust_bfregn(struct mlx5_ib_dev
*dev
,
908 struct mlx5_bfreg_info
*bfregi
, int bfregn
)
910 return bfregn
/ MLX5_NON_FP_BFREGS_PER_UAR
* MLX5_BFREGS_PER_UAR
+
911 bfregn
% MLX5_NON_FP_BFREGS_PER_UAR
;
914 static int _create_user_qp(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
915 struct mlx5_ib_qp
*qp
, struct ib_udata
*udata
,
916 struct ib_qp_init_attr
*attr
, u32
**in
,
917 struct mlx5_ib_create_qp_resp
*resp
, int *inlen
,
918 struct mlx5_ib_qp_base
*base
,
919 struct mlx5_ib_create_qp
*ucmd
)
921 struct mlx5_ib_ucontext
*context
;
922 struct mlx5_ib_ubuffer
*ubuffer
= &base
->ubuffer
;
935 context
= rdma_udata_to_drv_context(udata
, struct mlx5_ib_ucontext
,
937 uar_flags
= qp
->flags_en
&
938 (MLX5_QP_FLAG_UAR_PAGE_INDEX
| MLX5_QP_FLAG_BFREG_INDEX
);
940 case MLX5_QP_FLAG_UAR_PAGE_INDEX
:
941 uar_index
= ucmd
->bfreg_index
;
942 bfregn
= MLX5_IB_INVALID_BFREG
;
944 case MLX5_QP_FLAG_BFREG_INDEX
:
945 uar_index
= bfregn_to_uar_index(dev
, &context
->bfregi
,
946 ucmd
->bfreg_index
, true);
949 bfregn
= MLX5_IB_INVALID_BFREG
;
952 if (qp
->flags
& IB_QP_CREATE_CROSS_CHANNEL
)
954 bfregn
= alloc_bfreg(dev
, &context
->bfregi
);
962 mlx5_ib_dbg(dev
, "bfregn 0x%x, uar_index 0x%x\n", bfregn
, uar_index
);
963 if (bfregn
!= MLX5_IB_INVALID_BFREG
)
964 uar_index
= bfregn_to_uar_index(dev
, &context
->bfregi
, bfregn
,
968 qp
->sq
.wqe_shift
= ilog2(MLX5_SEND_WQE_BB
);
969 qp
->sq
.offset
= qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
;
971 err
= set_user_buf_size(dev
, qp
, ucmd
, base
, attr
);
975 if (ucmd
->buf_addr
&& ubuffer
->buf_size
) {
976 ubuffer
->buf_addr
= ucmd
->buf_addr
;
977 err
= mlx5_ib_umem_get(dev
, udata
, ubuffer
->buf_addr
,
978 ubuffer
->buf_size
, &ubuffer
->umem
,
979 &npages
, &page_shift
, &ncont
, &offset
);
983 ubuffer
->umem
= NULL
;
986 *inlen
= MLX5_ST_SZ_BYTES(create_qp_in
) +
987 MLX5_FLD_SZ_BYTES(create_qp_in
, pas
[0]) * ncont
;
988 *in
= kvzalloc(*inlen
, GFP_KERNEL
);
994 uid
= (attr
->qp_type
!= IB_QPT_XRC_INI
) ? to_mpd(pd
)->uid
: 0;
995 MLX5_SET(create_qp_in
, *in
, uid
, uid
);
996 pas
= (__be64
*)MLX5_ADDR_OF(create_qp_in
, *in
, pas
);
998 mlx5_ib_populate_pas(dev
, ubuffer
->umem
, page_shift
, pas
, 0);
1000 qpc
= MLX5_ADDR_OF(create_qp_in
, *in
, qpc
);
1002 MLX5_SET(qpc
, qpc
, log_page_size
, page_shift
- MLX5_ADAPTER_PAGE_SHIFT
);
1003 MLX5_SET(qpc
, qpc
, page_offset
, offset
);
1005 MLX5_SET(qpc
, qpc
, uar_page
, uar_index
);
1006 if (bfregn
!= MLX5_IB_INVALID_BFREG
)
1007 resp
->bfreg_index
= adjust_bfregn(dev
, &context
->bfregi
, bfregn
);
1009 resp
->bfreg_index
= MLX5_IB_INVALID_BFREG
;
1010 qp
->bfregn
= bfregn
;
1012 err
= mlx5_ib_db_map_user(context
, udata
, ucmd
->db_addr
, &qp
->db
);
1014 mlx5_ib_dbg(dev
, "map failed\n");
1018 err
= ib_copy_to_udata(udata
, resp
, min(udata
->outlen
, sizeof(*resp
)));
1020 mlx5_ib_dbg(dev
, "copy failed\n");
1027 mlx5_ib_db_unmap_user(context
, &qp
->db
);
1033 ib_umem_release(ubuffer
->umem
);
1036 if (bfregn
!= MLX5_IB_INVALID_BFREG
)
1037 mlx5_ib_free_bfreg(dev
, &context
->bfregi
, bfregn
);
1041 static void destroy_qp(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
1042 struct mlx5_ib_qp_base
*base
, struct ib_udata
*udata
)
1044 struct mlx5_ib_ucontext
*context
= rdma_udata_to_drv_context(
1045 udata
, struct mlx5_ib_ucontext
, ibucontext
);
1049 mlx5_ib_db_unmap_user(context
, &qp
->db
);
1050 ib_umem_release(base
->ubuffer
.umem
);
1053 * Free only the BFREGs which are handled by the kernel.
1054 * BFREGs of UARs allocated dynamically are handled by user.
1056 if (qp
->bfregn
!= MLX5_IB_INVALID_BFREG
)
1057 mlx5_ib_free_bfreg(dev
, &context
->bfregi
, qp
->bfregn
);
1062 kvfree(qp
->sq
.wqe_head
);
1063 kvfree(qp
->sq
.w_list
);
1064 kvfree(qp
->sq
.wrid
);
1065 kvfree(qp
->sq
.wr_data
);
1066 kvfree(qp
->rq
.wrid
);
1068 mlx5_db_free(dev
->mdev
, &qp
->db
);
1070 mlx5_frag_buf_free(dev
->mdev
, &qp
->buf
);
1073 /* get_sq_edge - Get the next nearby edge.
1075 * An 'edge' is defined as the first following address after the end
1076 * of the fragment or the SQ. Accordingly, during the WQE construction
1077 * which repetitively increases the pointer to write the next data, it
1078 * simply should check if it gets to an edge.
1081 * @idx - Stride index in the SQ buffer.
1086 static void *get_sq_edge(struct mlx5_ib_wq
*sq
, u32 idx
)
1090 fragment_end
= mlx5_frag_buf_get_wqe
1092 mlx5_frag_buf_get_idx_last_contig_stride(&sq
->fbc
, idx
));
1094 return fragment_end
+ MLX5_SEND_WQE_BB
;
1097 static int _create_kernel_qp(struct mlx5_ib_dev
*dev
,
1098 struct ib_qp_init_attr
*init_attr
,
1099 struct mlx5_ib_qp
*qp
, u32
**in
, int *inlen
,
1100 struct mlx5_ib_qp_base
*base
)
1106 if (init_attr
->qp_type
== MLX5_IB_QPT_REG_UMR
)
1107 qp
->bf
.bfreg
= &dev
->fp_bfreg
;
1108 else if (qp
->flags
& MLX5_IB_QP_CREATE_WC_TEST
)
1109 qp
->bf
.bfreg
= &dev
->wc_bfreg
;
1111 qp
->bf
.bfreg
= &dev
->bfreg
;
1113 /* We need to divide by two since each register is comprised of
1114 * two buffers of identical size, namely odd and even
1116 qp
->bf
.buf_size
= (1 << MLX5_CAP_GEN(dev
->mdev
, log_bf_reg_size
)) / 2;
1117 uar_index
= qp
->bf
.bfreg
->index
;
1119 err
= calc_sq_size(dev
, init_attr
, qp
);
1121 mlx5_ib_dbg(dev
, "err %d\n", err
);
1126 qp
->sq
.offset
= qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
;
1127 base
->ubuffer
.buf_size
= err
+ (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
);
1129 err
= mlx5_frag_buf_alloc_node(dev
->mdev
, base
->ubuffer
.buf_size
,
1130 &qp
->buf
, dev
->mdev
->priv
.numa_node
);
1132 mlx5_ib_dbg(dev
, "err %d\n", err
);
1137 mlx5_init_fbc(qp
->buf
.frags
, qp
->rq
.wqe_shift
,
1138 ilog2(qp
->rq
.wqe_cnt
), &qp
->rq
.fbc
);
1140 if (qp
->sq
.wqe_cnt
) {
1141 int sq_strides_offset
= (qp
->sq
.offset
& (PAGE_SIZE
- 1)) /
1143 mlx5_init_fbc_offset(qp
->buf
.frags
+
1144 (qp
->sq
.offset
/ PAGE_SIZE
),
1145 ilog2(MLX5_SEND_WQE_BB
),
1146 ilog2(qp
->sq
.wqe_cnt
),
1147 sq_strides_offset
, &qp
->sq
.fbc
);
1149 qp
->sq
.cur_edge
= get_sq_edge(&qp
->sq
, 0);
1152 *inlen
= MLX5_ST_SZ_BYTES(create_qp_in
) +
1153 MLX5_FLD_SZ_BYTES(create_qp_in
, pas
[0]) * qp
->buf
.npages
;
1154 *in
= kvzalloc(*inlen
, GFP_KERNEL
);
1160 qpc
= MLX5_ADDR_OF(create_qp_in
, *in
, qpc
);
1161 MLX5_SET(qpc
, qpc
, uar_page
, uar_index
);
1162 MLX5_SET(qpc
, qpc
, log_page_size
, qp
->buf
.page_shift
- MLX5_ADAPTER_PAGE_SHIFT
);
1164 /* Set "fast registration enabled" for all kernel QPs */
1165 MLX5_SET(qpc
, qpc
, fre
, 1);
1166 MLX5_SET(qpc
, qpc
, rlky
, 1);
1168 if (qp
->flags
& MLX5_IB_QP_CREATE_SQPN_QP1
)
1169 MLX5_SET(qpc
, qpc
, deth_sqpn
, 1);
1171 mlx5_fill_page_frag_array(&qp
->buf
,
1172 (__be64
*)MLX5_ADDR_OF(create_qp_in
,
1175 err
= mlx5_db_alloc(dev
->mdev
, &qp
->db
);
1177 mlx5_ib_dbg(dev
, "err %d\n", err
);
1181 qp
->sq
.wrid
= kvmalloc_array(qp
->sq
.wqe_cnt
,
1182 sizeof(*qp
->sq
.wrid
), GFP_KERNEL
);
1183 qp
->sq
.wr_data
= kvmalloc_array(qp
->sq
.wqe_cnt
,
1184 sizeof(*qp
->sq
.wr_data
), GFP_KERNEL
);
1185 qp
->rq
.wrid
= kvmalloc_array(qp
->rq
.wqe_cnt
,
1186 sizeof(*qp
->rq
.wrid
), GFP_KERNEL
);
1187 qp
->sq
.w_list
= kvmalloc_array(qp
->sq
.wqe_cnt
,
1188 sizeof(*qp
->sq
.w_list
), GFP_KERNEL
);
1189 qp
->sq
.wqe_head
= kvmalloc_array(qp
->sq
.wqe_cnt
,
1190 sizeof(*qp
->sq
.wqe_head
), GFP_KERNEL
);
1192 if (!qp
->sq
.wrid
|| !qp
->sq
.wr_data
|| !qp
->rq
.wrid
||
1193 !qp
->sq
.w_list
|| !qp
->sq
.wqe_head
) {
1201 kvfree(qp
->sq
.wqe_head
);
1202 kvfree(qp
->sq
.w_list
);
1203 kvfree(qp
->sq
.wrid
);
1204 kvfree(qp
->sq
.wr_data
);
1205 kvfree(qp
->rq
.wrid
);
1206 mlx5_db_free(dev
->mdev
, &qp
->db
);
1212 mlx5_frag_buf_free(dev
->mdev
, &qp
->buf
);
1216 static u32
get_rx_type(struct mlx5_ib_qp
*qp
, struct ib_qp_init_attr
*attr
)
1218 if (attr
->srq
|| (qp
->type
== IB_QPT_XRC_TGT
) ||
1219 (qp
->type
== MLX5_IB_QPT_DCI
) || (qp
->type
== IB_QPT_XRC_INI
))
1221 else if (!qp
->has_rq
)
1222 return MLX5_ZERO_LEN_RQ
;
1224 return MLX5_NON_ZERO_RQ
;
1227 static int create_raw_packet_qp_tis(struct mlx5_ib_dev
*dev
,
1228 struct mlx5_ib_qp
*qp
,
1229 struct mlx5_ib_sq
*sq
, u32 tdn
,
1232 u32 in
[MLX5_ST_SZ_DW(create_tis_in
)] = {};
1233 void *tisc
= MLX5_ADDR_OF(create_tis_in
, in
, ctx
);
1235 MLX5_SET(create_tis_in
, in
, uid
, to_mpd(pd
)->uid
);
1236 MLX5_SET(tisc
, tisc
, transport_domain
, tdn
);
1237 if (qp
->flags
& IB_QP_CREATE_SOURCE_QPN
)
1238 MLX5_SET(tisc
, tisc
, underlay_qpn
, qp
->underlay_qpn
);
1240 return mlx5_core_create_tis(dev
->mdev
, in
, &sq
->tisn
);
1243 static void destroy_raw_packet_qp_tis(struct mlx5_ib_dev
*dev
,
1244 struct mlx5_ib_sq
*sq
, struct ib_pd
*pd
)
1246 mlx5_cmd_destroy_tis(dev
->mdev
, sq
->tisn
, to_mpd(pd
)->uid
);
1249 static void destroy_flow_rule_vport_sq(struct mlx5_ib_sq
*sq
)
1252 mlx5_del_flow_rules(sq
->flow_rule
);
1253 sq
->flow_rule
= NULL
;
1256 static int create_raw_packet_qp_sq(struct mlx5_ib_dev
*dev
,
1257 struct ib_udata
*udata
,
1258 struct mlx5_ib_sq
*sq
, void *qpin
,
1261 struct mlx5_ib_ubuffer
*ubuffer
= &sq
->ubuffer
;
1265 void *qpc
= MLX5_ADDR_OF(create_qp_in
, qpin
, qpc
);
1274 err
= mlx5_ib_umem_get(dev
, udata
, ubuffer
->buf_addr
, ubuffer
->buf_size
,
1275 &sq
->ubuffer
.umem
, &npages
, &page_shift
, &ncont
,
1280 inlen
= MLX5_ST_SZ_BYTES(create_sq_in
) + sizeof(u64
) * ncont
;
1281 in
= kvzalloc(inlen
, GFP_KERNEL
);
1287 MLX5_SET(create_sq_in
, in
, uid
, to_mpd(pd
)->uid
);
1288 sqc
= MLX5_ADDR_OF(create_sq_in
, in
, ctx
);
1289 MLX5_SET(sqc
, sqc
, flush_in_error_en
, 1);
1290 if (MLX5_CAP_ETH(dev
->mdev
, multi_pkt_send_wqe
))
1291 MLX5_SET(sqc
, sqc
, allow_multi_pkt_send_wqe
, 1);
1292 MLX5_SET(sqc
, sqc
, state
, MLX5_SQC_STATE_RST
);
1293 MLX5_SET(sqc
, sqc
, user_index
, MLX5_GET(qpc
, qpc
, user_index
));
1294 MLX5_SET(sqc
, sqc
, cqn
, MLX5_GET(qpc
, qpc
, cqn_snd
));
1295 MLX5_SET(sqc
, sqc
, tis_lst_sz
, 1);
1296 MLX5_SET(sqc
, sqc
, tis_num_0
, sq
->tisn
);
1297 if (MLX5_CAP_GEN(dev
->mdev
, eth_net_offloads
) &&
1298 MLX5_CAP_ETH(dev
->mdev
, swp
))
1299 MLX5_SET(sqc
, sqc
, allow_swp
, 1);
1301 wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
1302 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_CYCLIC
);
1303 MLX5_SET(wq
, wq
, pd
, MLX5_GET(qpc
, qpc
, pd
));
1304 MLX5_SET(wq
, wq
, uar_page
, MLX5_GET(qpc
, qpc
, uar_page
));
1305 MLX5_SET64(wq
, wq
, dbr_addr
, MLX5_GET64(qpc
, qpc
, dbr_addr
));
1306 MLX5_SET(wq
, wq
, log_wq_stride
, ilog2(MLX5_SEND_WQE_BB
));
1307 MLX5_SET(wq
, wq
, log_wq_sz
, MLX5_GET(qpc
, qpc
, log_sq_size
));
1308 MLX5_SET(wq
, wq
, log_wq_pg_sz
, page_shift
- MLX5_ADAPTER_PAGE_SHIFT
);
1309 MLX5_SET(wq
, wq
, page_offset
, offset
);
1311 pas
= (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
);
1312 mlx5_ib_populate_pas(dev
, sq
->ubuffer
.umem
, page_shift
, pas
, 0);
1314 err
= mlx5_core_create_sq_tracked(dev
, in
, inlen
, &sq
->base
.mqp
);
1324 ib_umem_release(sq
->ubuffer
.umem
);
1325 sq
->ubuffer
.umem
= NULL
;
1330 static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev
*dev
,
1331 struct mlx5_ib_sq
*sq
)
1333 destroy_flow_rule_vport_sq(sq
);
1334 mlx5_core_destroy_sq_tracked(dev
, &sq
->base
.mqp
);
1335 ib_umem_release(sq
->ubuffer
.umem
);
1338 static size_t get_rq_pas_size(void *qpc
)
1340 u32 log_page_size
= MLX5_GET(qpc
, qpc
, log_page_size
) + 12;
1341 u32 log_rq_stride
= MLX5_GET(qpc
, qpc
, log_rq_stride
);
1342 u32 log_rq_size
= MLX5_GET(qpc
, qpc
, log_rq_size
);
1343 u32 page_offset
= MLX5_GET(qpc
, qpc
, page_offset
);
1344 u32 po_quanta
= 1 << (log_page_size
- 6);
1345 u32 rq_sz
= 1 << (log_rq_size
+ 4 + log_rq_stride
);
1346 u32 page_size
= 1 << log_page_size
;
1347 u32 rq_sz_po
= rq_sz
+ (page_offset
* po_quanta
);
1348 u32 rq_num_pas
= (rq_sz_po
+ page_size
- 1) / page_size
;
1350 return rq_num_pas
* sizeof(u64
);
1353 static int create_raw_packet_qp_rq(struct mlx5_ib_dev
*dev
,
1354 struct mlx5_ib_rq
*rq
, void *qpin
,
1355 size_t qpinlen
, struct ib_pd
*pd
)
1357 struct mlx5_ib_qp
*mqp
= rq
->base
.container_mibqp
;
1363 void *qpc
= MLX5_ADDR_OF(create_qp_in
, qpin
, qpc
);
1364 size_t rq_pas_size
= get_rq_pas_size(qpc
);
1368 if (qpinlen
< rq_pas_size
+ MLX5_BYTE_OFF(create_qp_in
, pas
))
1371 inlen
= MLX5_ST_SZ_BYTES(create_rq_in
) + rq_pas_size
;
1372 in
= kvzalloc(inlen
, GFP_KERNEL
);
1376 MLX5_SET(create_rq_in
, in
, uid
, to_mpd(pd
)->uid
);
1377 rqc
= MLX5_ADDR_OF(create_rq_in
, in
, ctx
);
1378 if (!(rq
->flags
& MLX5_IB_RQ_CVLAN_STRIPPING
))
1379 MLX5_SET(rqc
, rqc
, vsd
, 1);
1380 MLX5_SET(rqc
, rqc
, mem_rq_type
, MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE
);
1381 MLX5_SET(rqc
, rqc
, state
, MLX5_RQC_STATE_RST
);
1382 MLX5_SET(rqc
, rqc
, flush_in_error_en
, 1);
1383 MLX5_SET(rqc
, rqc
, user_index
, MLX5_GET(qpc
, qpc
, user_index
));
1384 MLX5_SET(rqc
, rqc
, cqn
, MLX5_GET(qpc
, qpc
, cqn_rcv
));
1386 if (mqp
->flags
& IB_QP_CREATE_SCATTER_FCS
)
1387 MLX5_SET(rqc
, rqc
, scatter_fcs
, 1);
1389 wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
1390 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_CYCLIC
);
1391 if (rq
->flags
& MLX5_IB_RQ_PCI_WRITE_END_PADDING
)
1392 MLX5_SET(wq
, wq
, end_padding_mode
, MLX5_WQ_END_PAD_MODE_ALIGN
);
1393 MLX5_SET(wq
, wq
, page_offset
, MLX5_GET(qpc
, qpc
, page_offset
));
1394 MLX5_SET(wq
, wq
, pd
, MLX5_GET(qpc
, qpc
, pd
));
1395 MLX5_SET64(wq
, wq
, dbr_addr
, MLX5_GET64(qpc
, qpc
, dbr_addr
));
1396 MLX5_SET(wq
, wq
, log_wq_stride
, MLX5_GET(qpc
, qpc
, log_rq_stride
) + 4);
1397 MLX5_SET(wq
, wq
, log_wq_pg_sz
, MLX5_GET(qpc
, qpc
, log_page_size
));
1398 MLX5_SET(wq
, wq
, log_wq_sz
, MLX5_GET(qpc
, qpc
, log_rq_size
));
1400 pas
= (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
);
1401 qp_pas
= (__be64
*)MLX5_ADDR_OF(create_qp_in
, qpin
, pas
);
1402 memcpy(pas
, qp_pas
, rq_pas_size
);
1404 err
= mlx5_core_create_rq_tracked(dev
, in
, inlen
, &rq
->base
.mqp
);
1411 static void destroy_raw_packet_qp_rq(struct mlx5_ib_dev
*dev
,
1412 struct mlx5_ib_rq
*rq
)
1414 mlx5_core_destroy_rq_tracked(dev
, &rq
->base
.mqp
);
1417 static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev
*dev
,
1418 struct mlx5_ib_rq
*rq
,
1422 if (qp_flags_en
& (MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC
|
1423 MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC
))
1424 mlx5_ib_disable_lb(dev
, false, true);
1425 mlx5_cmd_destroy_tir(dev
->mdev
, rq
->tirn
, to_mpd(pd
)->uid
);
1428 static int create_raw_packet_qp_tir(struct mlx5_ib_dev
*dev
,
1429 struct mlx5_ib_rq
*rq
, u32 tdn
,
1430 u32
*qp_flags_en
, struct ib_pd
*pd
,
1439 inlen
= MLX5_ST_SZ_BYTES(create_tir_in
);
1440 in
= kvzalloc(inlen
, GFP_KERNEL
);
1444 MLX5_SET(create_tir_in
, in
, uid
, to_mpd(pd
)->uid
);
1445 tirc
= MLX5_ADDR_OF(create_tir_in
, in
, ctx
);
1446 MLX5_SET(tirc
, tirc
, disp_type
, MLX5_TIRC_DISP_TYPE_DIRECT
);
1447 MLX5_SET(tirc
, tirc
, inline_rqn
, rq
->base
.mqp
.qpn
);
1448 MLX5_SET(tirc
, tirc
, transport_domain
, tdn
);
1449 if (*qp_flags_en
& MLX5_QP_FLAG_TUNNEL_OFFLOADS
)
1450 MLX5_SET(tirc
, tirc
, tunneled_offload_en
, 1);
1452 if (*qp_flags_en
& MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC
)
1453 lb_flag
|= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST
;
1455 if (*qp_flags_en
& MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC
)
1456 lb_flag
|= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST
;
1459 lb_flag
|= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST
;
1460 *qp_flags_en
|= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC
;
1463 MLX5_SET(tirc
, tirc
, self_lb_block
, lb_flag
);
1464 MLX5_SET(create_tir_in
, in
, opcode
, MLX5_CMD_OP_CREATE_TIR
);
1465 err
= mlx5_cmd_exec_inout(dev
->mdev
, create_tir
, in
, out
);
1466 rq
->tirn
= MLX5_GET(create_tir_out
, out
, tirn
);
1467 if (!err
&& MLX5_GET(tirc
, tirc
, self_lb_block
)) {
1468 err
= mlx5_ib_enable_lb(dev
, false, true);
1471 destroy_raw_packet_qp_tir(dev
, rq
, 0, pd
);
1478 static int create_raw_packet_qp(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
1479 u32
*in
, size_t inlen
,
1481 struct ib_udata
*udata
,
1482 struct mlx5_ib_create_qp_resp
*resp
)
1484 struct mlx5_ib_raw_packet_qp
*raw_packet_qp
= &qp
->raw_packet_qp
;
1485 struct mlx5_ib_sq
*sq
= &raw_packet_qp
->sq
;
1486 struct mlx5_ib_rq
*rq
= &raw_packet_qp
->rq
;
1487 struct mlx5_ib_ucontext
*mucontext
= rdma_udata_to_drv_context(
1488 udata
, struct mlx5_ib_ucontext
, ibucontext
);
1490 u32 tdn
= mucontext
->tdn
;
1491 u16 uid
= to_mpd(pd
)->uid
;
1492 u32 out
[MLX5_ST_SZ_DW(create_tir_out
)] = {};
1494 if (qp
->sq
.wqe_cnt
) {
1495 err
= create_raw_packet_qp_tis(dev
, qp
, sq
, tdn
, pd
);
1499 err
= create_raw_packet_qp_sq(dev
, udata
, sq
, in
, pd
);
1501 goto err_destroy_tis
;
1504 resp
->tisn
= sq
->tisn
;
1505 resp
->comp_mask
|= MLX5_IB_CREATE_QP_RESP_MASK_TISN
;
1506 resp
->sqn
= sq
->base
.mqp
.qpn
;
1507 resp
->comp_mask
|= MLX5_IB_CREATE_QP_RESP_MASK_SQN
;
1510 sq
->base
.container_mibqp
= qp
;
1511 sq
->base
.mqp
.event
= mlx5_ib_qp_event
;
1514 if (qp
->rq
.wqe_cnt
) {
1515 rq
->base
.container_mibqp
= qp
;
1517 if (qp
->flags
& IB_QP_CREATE_CVLAN_STRIPPING
)
1518 rq
->flags
|= MLX5_IB_RQ_CVLAN_STRIPPING
;
1519 if (qp
->flags
& IB_QP_CREATE_PCI_WRITE_END_PADDING
)
1520 rq
->flags
|= MLX5_IB_RQ_PCI_WRITE_END_PADDING
;
1521 err
= create_raw_packet_qp_rq(dev
, rq
, in
, inlen
, pd
);
1523 goto err_destroy_sq
;
1525 err
= create_raw_packet_qp_tir(dev
, rq
, tdn
, &qp
->flags_en
, pd
,
1528 goto err_destroy_rq
;
1531 resp
->rqn
= rq
->base
.mqp
.qpn
;
1532 resp
->comp_mask
|= MLX5_IB_CREATE_QP_RESP_MASK_RQN
;
1533 resp
->tirn
= rq
->tirn
;
1534 resp
->comp_mask
|= MLX5_IB_CREATE_QP_RESP_MASK_TIRN
;
1535 if (MLX5_CAP_FLOWTABLE_NIC_RX(dev
->mdev
, sw_owner
)) {
1536 resp
->tir_icm_addr
= MLX5_GET(
1537 create_tir_out
, out
, icm_address_31_0
);
1538 resp
->tir_icm_addr
|=
1539 (u64
)MLX5_GET(create_tir_out
, out
,
1542 resp
->tir_icm_addr
|=
1543 (u64
)MLX5_GET(create_tir_out
, out
,
1547 MLX5_IB_CREATE_QP_RESP_MASK_TIR_ICM_ADDR
;
1552 qp
->trans_qp
.base
.mqp
.qpn
= qp
->sq
.wqe_cnt
? sq
->base
.mqp
.qpn
:
1554 err
= ib_copy_to_udata(udata
, resp
, min(udata
->outlen
, sizeof(*resp
)));
1556 goto err_destroy_tir
;
1561 destroy_raw_packet_qp_tir(dev
, rq
, qp
->flags_en
, pd
);
1563 destroy_raw_packet_qp_rq(dev
, rq
);
1565 if (!qp
->sq
.wqe_cnt
)
1567 destroy_raw_packet_qp_sq(dev
, sq
);
1569 destroy_raw_packet_qp_tis(dev
, sq
, pd
);
1574 static void destroy_raw_packet_qp(struct mlx5_ib_dev
*dev
,
1575 struct mlx5_ib_qp
*qp
)
1577 struct mlx5_ib_raw_packet_qp
*raw_packet_qp
= &qp
->raw_packet_qp
;
1578 struct mlx5_ib_sq
*sq
= &raw_packet_qp
->sq
;
1579 struct mlx5_ib_rq
*rq
= &raw_packet_qp
->rq
;
1581 if (qp
->rq
.wqe_cnt
) {
1582 destroy_raw_packet_qp_tir(dev
, rq
, qp
->flags_en
, qp
->ibqp
.pd
);
1583 destroy_raw_packet_qp_rq(dev
, rq
);
1586 if (qp
->sq
.wqe_cnt
) {
1587 destroy_raw_packet_qp_sq(dev
, sq
);
1588 destroy_raw_packet_qp_tis(dev
, sq
, qp
->ibqp
.pd
);
1592 static void raw_packet_qp_copy_info(struct mlx5_ib_qp
*qp
,
1593 struct mlx5_ib_raw_packet_qp
*raw_packet_qp
)
1595 struct mlx5_ib_sq
*sq
= &raw_packet_qp
->sq
;
1596 struct mlx5_ib_rq
*rq
= &raw_packet_qp
->rq
;
1600 sq
->doorbell
= &qp
->db
;
1601 rq
->doorbell
= &qp
->db
;
1604 static void destroy_rss_raw_qp_tir(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
)
1606 if (qp
->flags_en
& (MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC
|
1607 MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC
))
1608 mlx5_ib_disable_lb(dev
, false, true);
1609 mlx5_cmd_destroy_tir(dev
->mdev
, qp
->rss_qp
.tirn
,
1610 to_mpd(qp
->ibqp
.pd
)->uid
);
1613 struct mlx5_create_qp_params
{
1614 struct ib_udata
*udata
;
1618 struct ib_qp_init_attr
*attr
;
1622 static int create_rss_raw_qp_tir(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
1623 struct mlx5_ib_qp
*qp
,
1624 struct mlx5_create_qp_params
*params
)
1626 struct ib_qp_init_attr
*init_attr
= params
->attr
;
1627 struct mlx5_ib_create_qp_rss
*ucmd
= params
->ucmd
;
1628 struct ib_udata
*udata
= params
->udata
;
1629 struct mlx5_ib_ucontext
*mucontext
= rdma_udata_to_drv_context(
1630 udata
, struct mlx5_ib_ucontext
, ibucontext
);
1631 struct mlx5_ib_create_qp_resp resp
= {};
1639 u32 selected_fields
= 0;
1641 size_t min_resp_len
;
1642 u32 tdn
= mucontext
->tdn
;
1646 offsetof(typeof(resp
), bfreg_index
) + sizeof(resp
.bfreg_index
);
1647 if (udata
->outlen
< min_resp_len
)
1650 if (ucmd
->comp_mask
) {
1651 mlx5_ib_dbg(dev
, "invalid comp mask\n");
1655 if (ucmd
->rx_hash_fields_mask
& MLX5_RX_HASH_INNER
&&
1656 !(ucmd
->flags
& MLX5_QP_FLAG_TUNNEL_OFFLOADS
)) {
1657 mlx5_ib_dbg(dev
, "Tunnel offloads must be set for inner RSS\n");
1662 qp
->flags_en
|= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC
;
1664 if (qp
->flags_en
& MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC
)
1665 lb_flag
|= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST
;
1667 if (qp
->flags_en
& MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC
)
1668 lb_flag
|= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST
;
1670 err
= ib_copy_to_udata(udata
, &resp
, min(udata
->outlen
, sizeof(resp
)));
1672 mlx5_ib_dbg(dev
, "copy failed\n");
1676 inlen
= MLX5_ST_SZ_BYTES(create_tir_in
);
1677 outlen
= MLX5_ST_SZ_BYTES(create_tir_out
);
1678 in
= kvzalloc(inlen
+ outlen
, GFP_KERNEL
);
1682 out
= in
+ MLX5_ST_SZ_DW(create_tir_in
);
1683 MLX5_SET(create_tir_in
, in
, uid
, to_mpd(pd
)->uid
);
1684 tirc
= MLX5_ADDR_OF(create_tir_in
, in
, ctx
);
1685 MLX5_SET(tirc
, tirc
, disp_type
,
1686 MLX5_TIRC_DISP_TYPE_INDIRECT
);
1687 MLX5_SET(tirc
, tirc
, indirect_table
,
1688 init_attr
->rwq_ind_tbl
->ind_tbl_num
);
1689 MLX5_SET(tirc
, tirc
, transport_domain
, tdn
);
1691 hfso
= MLX5_ADDR_OF(tirc
, tirc
, rx_hash_field_selector_outer
);
1693 if (ucmd
->flags
& MLX5_QP_FLAG_TUNNEL_OFFLOADS
)
1694 MLX5_SET(tirc
, tirc
, tunneled_offload_en
, 1);
1696 MLX5_SET(tirc
, tirc
, self_lb_block
, lb_flag
);
1698 if (ucmd
->rx_hash_fields_mask
& MLX5_RX_HASH_INNER
)
1699 hfso
= MLX5_ADDR_OF(tirc
, tirc
, rx_hash_field_selector_inner
);
1701 hfso
= MLX5_ADDR_OF(tirc
, tirc
, rx_hash_field_selector_outer
);
1703 switch (ucmd
->rx_hash_function
) {
1704 case MLX5_RX_HASH_FUNC_TOEPLITZ
:
1706 void *rss_key
= MLX5_ADDR_OF(tirc
, tirc
, rx_hash_toeplitz_key
);
1707 size_t len
= MLX5_FLD_SZ_BYTES(tirc
, rx_hash_toeplitz_key
);
1709 if (len
!= ucmd
->rx_key_len
) {
1714 MLX5_SET(tirc
, tirc
, rx_hash_fn
, MLX5_RX_HASH_FN_TOEPLITZ
);
1715 memcpy(rss_key
, ucmd
->rx_hash_key
, len
);
1723 if (!ucmd
->rx_hash_fields_mask
) {
1724 /* special case when this TIR serves as steering entry without hashing */
1725 if (!init_attr
->rwq_ind_tbl
->log_ind_tbl_size
)
1731 if (((ucmd
->rx_hash_fields_mask
& MLX5_RX_HASH_SRC_IPV4
) ||
1732 (ucmd
->rx_hash_fields_mask
& MLX5_RX_HASH_DST_IPV4
)) &&
1733 ((ucmd
->rx_hash_fields_mask
& MLX5_RX_HASH_SRC_IPV6
) ||
1734 (ucmd
->rx_hash_fields_mask
& MLX5_RX_HASH_DST_IPV6
))) {
1739 /* If none of IPV4 & IPV6 SRC/DST was set - this bit field is ignored */
1740 if ((ucmd
->rx_hash_fields_mask
& MLX5_RX_HASH_SRC_IPV4
) ||
1741 (ucmd
->rx_hash_fields_mask
& MLX5_RX_HASH_DST_IPV4
))
1742 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1743 MLX5_L3_PROT_TYPE_IPV4
);
1744 else if ((ucmd
->rx_hash_fields_mask
& MLX5_RX_HASH_SRC_IPV6
) ||
1745 (ucmd
->rx_hash_fields_mask
& MLX5_RX_HASH_DST_IPV6
))
1746 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1747 MLX5_L3_PROT_TYPE_IPV6
);
1749 outer_l4
= ((ucmd
->rx_hash_fields_mask
& MLX5_RX_HASH_SRC_PORT_TCP
) ||
1750 (ucmd
->rx_hash_fields_mask
& MLX5_RX_HASH_DST_PORT_TCP
))
1752 ((ucmd
->rx_hash_fields_mask
& MLX5_RX_HASH_SRC_PORT_UDP
) ||
1753 (ucmd
->rx_hash_fields_mask
& MLX5_RX_HASH_DST_PORT_UDP
))
1755 (ucmd
->rx_hash_fields_mask
& MLX5_RX_HASH_IPSEC_SPI
) << 2;
1757 /* Check that only one l4 protocol is set */
1758 if (outer_l4
& (outer_l4
- 1)) {
1763 /* If none of TCP & UDP SRC/DST was set - this bit field is ignored */
1764 if ((ucmd
->rx_hash_fields_mask
& MLX5_RX_HASH_SRC_PORT_TCP
) ||
1765 (ucmd
->rx_hash_fields_mask
& MLX5_RX_HASH_DST_PORT_TCP
))
1766 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
1767 MLX5_L4_PROT_TYPE_TCP
);
1768 else if ((ucmd
->rx_hash_fields_mask
& MLX5_RX_HASH_SRC_PORT_UDP
) ||
1769 (ucmd
->rx_hash_fields_mask
& MLX5_RX_HASH_DST_PORT_UDP
))
1770 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
1771 MLX5_L4_PROT_TYPE_UDP
);
1773 if ((ucmd
->rx_hash_fields_mask
& MLX5_RX_HASH_SRC_IPV4
) ||
1774 (ucmd
->rx_hash_fields_mask
& MLX5_RX_HASH_SRC_IPV6
))
1775 selected_fields
|= MLX5_HASH_FIELD_SEL_SRC_IP
;
1777 if ((ucmd
->rx_hash_fields_mask
& MLX5_RX_HASH_DST_IPV4
) ||
1778 (ucmd
->rx_hash_fields_mask
& MLX5_RX_HASH_DST_IPV6
))
1779 selected_fields
|= MLX5_HASH_FIELD_SEL_DST_IP
;
1781 if ((ucmd
->rx_hash_fields_mask
& MLX5_RX_HASH_SRC_PORT_TCP
) ||
1782 (ucmd
->rx_hash_fields_mask
& MLX5_RX_HASH_SRC_PORT_UDP
))
1783 selected_fields
|= MLX5_HASH_FIELD_SEL_L4_SPORT
;
1785 if ((ucmd
->rx_hash_fields_mask
& MLX5_RX_HASH_DST_PORT_TCP
) ||
1786 (ucmd
->rx_hash_fields_mask
& MLX5_RX_HASH_DST_PORT_UDP
))
1787 selected_fields
|= MLX5_HASH_FIELD_SEL_L4_DPORT
;
1789 if (ucmd
->rx_hash_fields_mask
& MLX5_RX_HASH_IPSEC_SPI
)
1790 selected_fields
|= MLX5_HASH_FIELD_SEL_IPSEC_SPI
;
1792 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
, selected_fields
);
1795 MLX5_SET(create_tir_in
, in
, opcode
, MLX5_CMD_OP_CREATE_TIR
);
1796 err
= mlx5_cmd_exec_inout(dev
->mdev
, create_tir
, in
, out
);
1798 qp
->rss_qp
.tirn
= MLX5_GET(create_tir_out
, out
, tirn
);
1799 if (!err
&& MLX5_GET(tirc
, tirc
, self_lb_block
)) {
1800 err
= mlx5_ib_enable_lb(dev
, false, true);
1803 mlx5_cmd_destroy_tir(dev
->mdev
, qp
->rss_qp
.tirn
,
1810 if (mucontext
->devx_uid
) {
1811 resp
.comp_mask
|= MLX5_IB_CREATE_QP_RESP_MASK_TIRN
;
1812 resp
.tirn
= qp
->rss_qp
.tirn
;
1813 if (MLX5_CAP_FLOWTABLE_NIC_RX(dev
->mdev
, sw_owner
)) {
1815 MLX5_GET(create_tir_out
, out
, icm_address_31_0
);
1816 resp
.tir_icm_addr
|= (u64
)MLX5_GET(create_tir_out
, out
,
1819 resp
.tir_icm_addr
|= (u64
)MLX5_GET(create_tir_out
, out
,
1823 MLX5_IB_CREATE_QP_RESP_MASK_TIR_ICM_ADDR
;
1827 err
= ib_copy_to_udata(udata
, &resp
, min(udata
->outlen
, sizeof(resp
)));
1832 /* qpn is reserved for that QP */
1833 qp
->trans_qp
.base
.mqp
.qpn
= 0;
1838 mlx5_cmd_destroy_tir(dev
->mdev
, qp
->rss_qp
.tirn
, mucontext
->devx_uid
);
1844 static void configure_requester_scat_cqe(struct mlx5_ib_dev
*dev
,
1845 struct ib_qp_init_attr
*init_attr
,
1846 struct mlx5_ib_create_qp
*ucmd
,
1850 bool allow_scat_cqe
= false;
1853 allow_scat_cqe
= ucmd
->flags
& MLX5_QP_FLAG_ALLOW_SCATTER_CQE
;
1855 if (!allow_scat_cqe
&& init_attr
->sq_sig_type
!= IB_SIGNAL_ALL_WR
)
1858 scqe_sz
= mlx5_ib_get_cqe_size(init_attr
->send_cq
);
1859 if (scqe_sz
== 128) {
1860 MLX5_SET(qpc
, qpc
, cs_req
, MLX5_REQ_SCAT_DATA64_CQE
);
1864 if (init_attr
->qp_type
!= MLX5_IB_QPT_DCI
||
1865 MLX5_CAP_GEN(dev
->mdev
, dc_req_scat_data_cqe
))
1866 MLX5_SET(qpc
, qpc
, cs_req
, MLX5_REQ_SCAT_DATA32_CQE
);
1869 static int atomic_size_to_mode(int size_mask
)
1871 /* driver does not support atomic_size > 256B
1872 * and does not know how to translate bigger sizes
1874 int supported_size_mask
= size_mask
& 0x1ff;
1877 if (!supported_size_mask
)
1880 log_max_size
= __fls(supported_size_mask
);
1882 if (log_max_size
> 3)
1883 return log_max_size
;
1885 return MLX5_ATOMIC_MODE_8B
;
1888 static int get_atomic_mode(struct mlx5_ib_dev
*dev
,
1889 enum ib_qp_type qp_type
)
1891 u8 atomic_operations
= MLX5_CAP_ATOMIC(dev
->mdev
, atomic_operations
);
1892 u8 atomic
= MLX5_CAP_GEN(dev
->mdev
, atomic
);
1893 int atomic_mode
= -EOPNOTSUPP
;
1894 int atomic_size_mask
;
1899 if (qp_type
== MLX5_IB_QPT_DCT
)
1900 atomic_size_mask
= MLX5_CAP_ATOMIC(dev
->mdev
, atomic_size_dc
);
1902 atomic_size_mask
= MLX5_CAP_ATOMIC(dev
->mdev
, atomic_size_qp
);
1904 if ((atomic_operations
& MLX5_ATOMIC_OPS_EXTENDED_CMP_SWAP
) ||
1905 (atomic_operations
& MLX5_ATOMIC_OPS_EXTENDED_FETCH_ADD
))
1906 atomic_mode
= atomic_size_to_mode(atomic_size_mask
);
1908 if (atomic_mode
<= 0 &&
1909 (atomic_operations
& MLX5_ATOMIC_OPS_CMP_SWAP
&&
1910 atomic_operations
& MLX5_ATOMIC_OPS_FETCH_ADD
))
1911 atomic_mode
= MLX5_ATOMIC_MODE_IB_COMP
;
1916 static int create_xrc_tgt_qp(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
1917 struct mlx5_create_qp_params
*params
)
1919 struct ib_qp_init_attr
*attr
= params
->attr
;
1920 struct ib_udata
*udata
= params
->udata
;
1921 u32 uidx
= params
->uidx
;
1922 struct mlx5_ib_resources
*devr
= &dev
->devr
;
1923 int inlen
= MLX5_ST_SZ_BYTES(create_qp_in
);
1924 struct mlx5_core_dev
*mdev
= dev
->mdev
;
1925 struct mlx5_ib_qp_base
*base
;
1926 unsigned long flags
;
1931 mutex_init(&qp
->mutex
);
1933 if (attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
)
1934 qp
->sq_signal_bits
= MLX5_WQE_CTRL_CQ_UPDATE
;
1936 in
= kvzalloc(inlen
, GFP_KERNEL
);
1940 qpc
= MLX5_ADDR_OF(create_qp_in
, in
, qpc
);
1942 MLX5_SET(qpc
, qpc
, st
, MLX5_QP_ST_XRC
);
1943 MLX5_SET(qpc
, qpc
, pm_state
, MLX5_QP_PM_MIGRATED
);
1944 MLX5_SET(qpc
, qpc
, pd
, to_mpd(devr
->p0
)->pdn
);
1946 if (qp
->flags
& IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
)
1947 MLX5_SET(qpc
, qpc
, block_lb_mc
, 1);
1948 if (qp
->flags
& IB_QP_CREATE_CROSS_CHANNEL
)
1949 MLX5_SET(qpc
, qpc
, cd_master
, 1);
1950 if (qp
->flags
& IB_QP_CREATE_MANAGED_SEND
)
1951 MLX5_SET(qpc
, qpc
, cd_slave_send
, 1);
1952 if (qp
->flags
& IB_QP_CREATE_MANAGED_RECV
)
1953 MLX5_SET(qpc
, qpc
, cd_slave_receive
, 1);
1955 MLX5_SET(qpc
, qpc
, rq_type
, MLX5_SRQ_RQ
);
1956 MLX5_SET(qpc
, qpc
, no_sq
, 1);
1957 MLX5_SET(qpc
, qpc
, cqn_rcv
, to_mcq(devr
->c0
)->mcq
.cqn
);
1958 MLX5_SET(qpc
, qpc
, cqn_snd
, to_mcq(devr
->c0
)->mcq
.cqn
);
1959 MLX5_SET(qpc
, qpc
, srqn_rmpn_xrqn
, to_msrq(devr
->s0
)->msrq
.srqn
);
1960 MLX5_SET(qpc
, qpc
, xrcd
, to_mxrcd(attr
->xrcd
)->xrcdn
);
1961 MLX5_SET64(qpc
, qpc
, dbr_addr
, qp
->db
.dma
);
1963 /* 0xffffff means we ask to work with cqe version 0 */
1964 if (MLX5_CAP_GEN(mdev
, cqe_version
) == MLX5_CQE_VERSION_V1
)
1965 MLX5_SET(qpc
, qpc
, user_index
, uidx
);
1967 if (qp
->flags
& IB_QP_CREATE_PCI_WRITE_END_PADDING
) {
1968 MLX5_SET(qpc
, qpc
, end_padding_mode
,
1969 MLX5_WQ_END_PAD_MODE_ALIGN
);
1970 /* Special case to clean flag */
1971 qp
->flags
&= ~IB_QP_CREATE_PCI_WRITE_END_PADDING
;
1974 base
= &qp
->trans_qp
.base
;
1975 err
= mlx5_core_create_qp(dev
, &base
->mqp
, in
, inlen
);
1978 destroy_qp(dev
, qp
, base
, udata
);
1982 base
->container_mibqp
= qp
;
1983 base
->mqp
.event
= mlx5_ib_qp_event
;
1985 spin_lock_irqsave(&dev
->reset_flow_resource_lock
, flags
);
1986 list_add_tail(&qp
->qps_list
, &dev
->qp_list
);
1987 spin_unlock_irqrestore(&dev
->reset_flow_resource_lock
, flags
);
1992 static int create_user_qp(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
1993 struct mlx5_ib_qp
*qp
,
1994 struct mlx5_create_qp_params
*params
)
1996 struct ib_qp_init_attr
*init_attr
= params
->attr
;
1997 struct mlx5_ib_create_qp
*ucmd
= params
->ucmd
;
1998 struct ib_udata
*udata
= params
->udata
;
1999 u32 uidx
= params
->uidx
;
2000 struct mlx5_ib_resources
*devr
= &dev
->devr
;
2001 int inlen
= MLX5_ST_SZ_BYTES(create_qp_in
);
2002 struct mlx5_core_dev
*mdev
= dev
->mdev
;
2003 struct mlx5_ib_create_qp_resp resp
= {};
2004 struct mlx5_ib_cq
*send_cq
;
2005 struct mlx5_ib_cq
*recv_cq
;
2006 unsigned long flags
;
2007 struct mlx5_ib_qp_base
*base
;
2013 mutex_init(&qp
->mutex
);
2014 spin_lock_init(&qp
->sq
.lock
);
2015 spin_lock_init(&qp
->rq
.lock
);
2017 mlx5_st
= to_mlx5_st(qp
->type
);
2021 if (init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
)
2022 qp
->sq_signal_bits
= MLX5_WQE_CTRL_CQ_UPDATE
;
2024 if (qp
->flags
& IB_QP_CREATE_SOURCE_QPN
)
2025 qp
->underlay_qpn
= init_attr
->source_qpn
;
2027 base
= (init_attr
->qp_type
== IB_QPT_RAW_PACKET
||
2028 qp
->flags
& IB_QP_CREATE_SOURCE_QPN
) ?
2029 &qp
->raw_packet_qp
.rq
.base
:
2032 qp
->has_rq
= qp_has_rq(init_attr
);
2033 err
= set_rq_size(dev
, &init_attr
->cap
, qp
->has_rq
, qp
, ucmd
);
2035 mlx5_ib_dbg(dev
, "err %d\n", err
);
2039 if (ucmd
->rq_wqe_shift
!= qp
->rq
.wqe_shift
||
2040 ucmd
->rq_wqe_count
!= qp
->rq
.wqe_cnt
)
2043 if (ucmd
->sq_wqe_count
> (1 << MLX5_CAP_GEN(mdev
, log_max_qp_sz
)))
2046 err
= _create_user_qp(dev
, pd
, qp
, udata
, init_attr
, &in
, &resp
, &inlen
,
2051 if (is_sqp(init_attr
->qp_type
))
2052 qp
->port
= init_attr
->port_num
;
2054 qpc
= MLX5_ADDR_OF(create_qp_in
, in
, qpc
);
2056 MLX5_SET(qpc
, qpc
, st
, mlx5_st
);
2057 MLX5_SET(qpc
, qpc
, pm_state
, MLX5_QP_PM_MIGRATED
);
2058 MLX5_SET(qpc
, qpc
, pd
, to_mpd(pd
)->pdn
);
2060 if (qp
->flags_en
& MLX5_QP_FLAG_SIGNATURE
)
2061 MLX5_SET(qpc
, qpc
, wq_signature
, 1);
2063 if (qp
->flags
& IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
)
2064 MLX5_SET(qpc
, qpc
, block_lb_mc
, 1);
2066 if (qp
->flags
& IB_QP_CREATE_CROSS_CHANNEL
)
2067 MLX5_SET(qpc
, qpc
, cd_master
, 1);
2068 if (qp
->flags
& IB_QP_CREATE_MANAGED_SEND
)
2069 MLX5_SET(qpc
, qpc
, cd_slave_send
, 1);
2070 if (qp
->flags
& IB_QP_CREATE_MANAGED_RECV
)
2071 MLX5_SET(qpc
, qpc
, cd_slave_receive
, 1);
2072 if (qp
->flags_en
& MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE
)
2073 MLX5_SET(qpc
, qpc
, req_e2e_credit_mode
, 1);
2074 if ((qp
->flags_en
& MLX5_QP_FLAG_SCATTER_CQE
) &&
2075 (init_attr
->qp_type
== IB_QPT_RC
||
2076 init_attr
->qp_type
== IB_QPT_UC
)) {
2077 int rcqe_sz
= rcqe_sz
=
2078 mlx5_ib_get_cqe_size(init_attr
->recv_cq
);
2080 MLX5_SET(qpc
, qpc
, cs_res
,
2081 rcqe_sz
== 128 ? MLX5_RES_SCAT_DATA64_CQE
:
2082 MLX5_RES_SCAT_DATA32_CQE
);
2084 if ((qp
->flags_en
& MLX5_QP_FLAG_SCATTER_CQE
) &&
2085 (qp
->type
== MLX5_IB_QPT_DCI
|| qp
->type
== IB_QPT_RC
))
2086 configure_requester_scat_cqe(dev
, init_attr
, ucmd
, qpc
);
2088 if (qp
->rq
.wqe_cnt
) {
2089 MLX5_SET(qpc
, qpc
, log_rq_stride
, qp
->rq
.wqe_shift
- 4);
2090 MLX5_SET(qpc
, qpc
, log_rq_size
, ilog2(qp
->rq
.wqe_cnt
));
2093 MLX5_SET(qpc
, qpc
, rq_type
, get_rx_type(qp
, init_attr
));
2095 if (qp
->sq
.wqe_cnt
) {
2096 MLX5_SET(qpc
, qpc
, log_sq_size
, ilog2(qp
->sq
.wqe_cnt
));
2098 MLX5_SET(qpc
, qpc
, no_sq
, 1);
2099 if (init_attr
->srq
&&
2100 init_attr
->srq
->srq_type
== IB_SRQT_TM
)
2101 MLX5_SET(qpc
, qpc
, offload_type
,
2102 MLX5_QPC_OFFLOAD_TYPE_RNDV
);
2105 /* Set default resources */
2106 switch (init_attr
->qp_type
) {
2107 case IB_QPT_XRC_INI
:
2108 MLX5_SET(qpc
, qpc
, cqn_rcv
, to_mcq(devr
->c0
)->mcq
.cqn
);
2109 MLX5_SET(qpc
, qpc
, xrcd
, to_mxrcd(devr
->x1
)->xrcdn
);
2110 MLX5_SET(qpc
, qpc
, srqn_rmpn_xrqn
, to_msrq(devr
->s0
)->msrq
.srqn
);
2113 if (init_attr
->srq
) {
2114 MLX5_SET(qpc
, qpc
, xrcd
, to_mxrcd(devr
->x0
)->xrcdn
);
2115 MLX5_SET(qpc
, qpc
, srqn_rmpn_xrqn
, to_msrq(init_attr
->srq
)->msrq
.srqn
);
2117 MLX5_SET(qpc
, qpc
, xrcd
, to_mxrcd(devr
->x1
)->xrcdn
);
2118 MLX5_SET(qpc
, qpc
, srqn_rmpn_xrqn
, to_msrq(devr
->s1
)->msrq
.srqn
);
2122 if (init_attr
->send_cq
)
2123 MLX5_SET(qpc
, qpc
, cqn_snd
, to_mcq(init_attr
->send_cq
)->mcq
.cqn
);
2125 if (init_attr
->recv_cq
)
2126 MLX5_SET(qpc
, qpc
, cqn_rcv
, to_mcq(init_attr
->recv_cq
)->mcq
.cqn
);
2128 MLX5_SET64(qpc
, qpc
, dbr_addr
, qp
->db
.dma
);
2130 /* 0xffffff means we ask to work with cqe version 0 */
2131 if (MLX5_CAP_GEN(mdev
, cqe_version
) == MLX5_CQE_VERSION_V1
)
2132 MLX5_SET(qpc
, qpc
, user_index
, uidx
);
2134 if (qp
->flags
& IB_QP_CREATE_PCI_WRITE_END_PADDING
&&
2135 init_attr
->qp_type
!= IB_QPT_RAW_PACKET
) {
2136 MLX5_SET(qpc
, qpc
, end_padding_mode
,
2137 MLX5_WQ_END_PAD_MODE_ALIGN
);
2138 /* Special case to clean flag */
2139 qp
->flags
&= ~IB_QP_CREATE_PCI_WRITE_END_PADDING
;
2142 if (init_attr
->qp_type
== IB_QPT_RAW_PACKET
||
2143 qp
->flags
& IB_QP_CREATE_SOURCE_QPN
) {
2144 qp
->raw_packet_qp
.sq
.ubuffer
.buf_addr
= ucmd
->sq_buf_addr
;
2145 raw_packet_qp_copy_info(qp
, &qp
->raw_packet_qp
);
2146 err
= create_raw_packet_qp(dev
, qp
, in
, inlen
, pd
, udata
,
2149 err
= mlx5_core_create_qp(dev
, &base
->mqp
, in
, inlen
);
2155 base
->container_mibqp
= qp
;
2156 base
->mqp
.event
= mlx5_ib_qp_event
;
2158 get_cqs(qp
->type
, init_attr
->send_cq
, init_attr
->recv_cq
,
2159 &send_cq
, &recv_cq
);
2160 spin_lock_irqsave(&dev
->reset_flow_resource_lock
, flags
);
2161 mlx5_ib_lock_cqs(send_cq
, recv_cq
);
2162 /* Maintain device to QPs access, needed for further handling via reset
2165 list_add_tail(&qp
->qps_list
, &dev
->qp_list
);
2166 /* Maintain CQ to QPs access, needed for further handling via reset flow
2169 list_add_tail(&qp
->cq_send_list
, &send_cq
->list_send_qp
);
2171 list_add_tail(&qp
->cq_recv_list
, &recv_cq
->list_recv_qp
);
2172 mlx5_ib_unlock_cqs(send_cq
, recv_cq
);
2173 spin_unlock_irqrestore(&dev
->reset_flow_resource_lock
, flags
);
2178 destroy_qp(dev
, qp
, base
, udata
);
2182 static int create_kernel_qp(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
2183 struct mlx5_ib_qp
*qp
,
2184 struct mlx5_create_qp_params
*params
)
2186 struct ib_qp_init_attr
*attr
= params
->attr
;
2187 u32 uidx
= params
->uidx
;
2188 struct mlx5_ib_resources
*devr
= &dev
->devr
;
2189 int inlen
= MLX5_ST_SZ_BYTES(create_qp_in
);
2190 struct mlx5_core_dev
*mdev
= dev
->mdev
;
2191 struct mlx5_ib_cq
*send_cq
;
2192 struct mlx5_ib_cq
*recv_cq
;
2193 unsigned long flags
;
2194 struct mlx5_ib_qp_base
*base
;
2200 mutex_init(&qp
->mutex
);
2201 spin_lock_init(&qp
->sq
.lock
);
2202 spin_lock_init(&qp
->rq
.lock
);
2204 mlx5_st
= to_mlx5_st(qp
->type
);
2208 if (attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
)
2209 qp
->sq_signal_bits
= MLX5_WQE_CTRL_CQ_UPDATE
;
2211 base
= &qp
->trans_qp
.base
;
2213 qp
->has_rq
= qp_has_rq(attr
);
2214 err
= set_rq_size(dev
, &attr
->cap
, qp
->has_rq
, qp
, NULL
);
2216 mlx5_ib_dbg(dev
, "err %d\n", err
);
2220 err
= _create_kernel_qp(dev
, attr
, qp
, &in
, &inlen
, base
);
2224 if (is_sqp(attr
->qp_type
))
2225 qp
->port
= attr
->port_num
;
2227 qpc
= MLX5_ADDR_OF(create_qp_in
, in
, qpc
);
2229 MLX5_SET(qpc
, qpc
, st
, mlx5_st
);
2230 MLX5_SET(qpc
, qpc
, pm_state
, MLX5_QP_PM_MIGRATED
);
2232 if (attr
->qp_type
!= MLX5_IB_QPT_REG_UMR
)
2233 MLX5_SET(qpc
, qpc
, pd
, to_mpd(pd
? pd
: devr
->p0
)->pdn
);
2235 MLX5_SET(qpc
, qpc
, latency_sensitive
, 1);
2238 if (qp
->flags
& IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
)
2239 MLX5_SET(qpc
, qpc
, block_lb_mc
, 1);
2241 if (qp
->rq
.wqe_cnt
) {
2242 MLX5_SET(qpc
, qpc
, log_rq_stride
, qp
->rq
.wqe_shift
- 4);
2243 MLX5_SET(qpc
, qpc
, log_rq_size
, ilog2(qp
->rq
.wqe_cnt
));
2246 MLX5_SET(qpc
, qpc
, rq_type
, get_rx_type(qp
, attr
));
2249 MLX5_SET(qpc
, qpc
, log_sq_size
, ilog2(qp
->sq
.wqe_cnt
));
2251 MLX5_SET(qpc
, qpc
, no_sq
, 1);
2254 MLX5_SET(qpc
, qpc
, xrcd
, to_mxrcd(devr
->x0
)->xrcdn
);
2255 MLX5_SET(qpc
, qpc
, srqn_rmpn_xrqn
,
2256 to_msrq(attr
->srq
)->msrq
.srqn
);
2258 MLX5_SET(qpc
, qpc
, xrcd
, to_mxrcd(devr
->x1
)->xrcdn
);
2259 MLX5_SET(qpc
, qpc
, srqn_rmpn_xrqn
,
2260 to_msrq(devr
->s1
)->msrq
.srqn
);
2264 MLX5_SET(qpc
, qpc
, cqn_snd
, to_mcq(attr
->send_cq
)->mcq
.cqn
);
2267 MLX5_SET(qpc
, qpc
, cqn_rcv
, to_mcq(attr
->recv_cq
)->mcq
.cqn
);
2269 MLX5_SET64(qpc
, qpc
, dbr_addr
, qp
->db
.dma
);
2271 /* 0xffffff means we ask to work with cqe version 0 */
2272 if (MLX5_CAP_GEN(mdev
, cqe_version
) == MLX5_CQE_VERSION_V1
)
2273 MLX5_SET(qpc
, qpc
, user_index
, uidx
);
2275 /* we use IB_QP_CREATE_IPOIB_UD_LSO to indicates ipoib qp */
2276 if (qp
->flags
& IB_QP_CREATE_IPOIB_UD_LSO
)
2277 MLX5_SET(qpc
, qpc
, ulp_stateless_offload_mode
, 1);
2279 err
= mlx5_core_create_qp(dev
, &base
->mqp
, in
, inlen
);
2284 base
->container_mibqp
= qp
;
2285 base
->mqp
.event
= mlx5_ib_qp_event
;
2287 get_cqs(qp
->type
, attr
->send_cq
, attr
->recv_cq
,
2288 &send_cq
, &recv_cq
);
2289 spin_lock_irqsave(&dev
->reset_flow_resource_lock
, flags
);
2290 mlx5_ib_lock_cqs(send_cq
, recv_cq
);
2291 /* Maintain device to QPs access, needed for further handling via reset
2294 list_add_tail(&qp
->qps_list
, &dev
->qp_list
);
2295 /* Maintain CQ to QPs access, needed for further handling via reset flow
2298 list_add_tail(&qp
->cq_send_list
, &send_cq
->list_send_qp
);
2300 list_add_tail(&qp
->cq_recv_list
, &recv_cq
->list_recv_qp
);
2301 mlx5_ib_unlock_cqs(send_cq
, recv_cq
);
2302 spin_unlock_irqrestore(&dev
->reset_flow_resource_lock
, flags
);
2307 destroy_qp(dev
, qp
, base
, NULL
);
2311 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq
*send_cq
, struct mlx5_ib_cq
*recv_cq
)
2312 __acquires(&send_cq
->lock
) __acquires(&recv_cq
->lock
)
2316 if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
2317 spin_lock(&send_cq
->lock
);
2318 spin_lock_nested(&recv_cq
->lock
,
2319 SINGLE_DEPTH_NESTING
);
2320 } else if (send_cq
->mcq
.cqn
== recv_cq
->mcq
.cqn
) {
2321 spin_lock(&send_cq
->lock
);
2322 __acquire(&recv_cq
->lock
);
2324 spin_lock(&recv_cq
->lock
);
2325 spin_lock_nested(&send_cq
->lock
,
2326 SINGLE_DEPTH_NESTING
);
2329 spin_lock(&send_cq
->lock
);
2330 __acquire(&recv_cq
->lock
);
2332 } else if (recv_cq
) {
2333 spin_lock(&recv_cq
->lock
);
2334 __acquire(&send_cq
->lock
);
2336 __acquire(&send_cq
->lock
);
2337 __acquire(&recv_cq
->lock
);
2341 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq
*send_cq
, struct mlx5_ib_cq
*recv_cq
)
2342 __releases(&send_cq
->lock
) __releases(&recv_cq
->lock
)
2346 if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
2347 spin_unlock(&recv_cq
->lock
);
2348 spin_unlock(&send_cq
->lock
);
2349 } else if (send_cq
->mcq
.cqn
== recv_cq
->mcq
.cqn
) {
2350 __release(&recv_cq
->lock
);
2351 spin_unlock(&send_cq
->lock
);
2353 spin_unlock(&send_cq
->lock
);
2354 spin_unlock(&recv_cq
->lock
);
2357 __release(&recv_cq
->lock
);
2358 spin_unlock(&send_cq
->lock
);
2360 } else if (recv_cq
) {
2361 __release(&send_cq
->lock
);
2362 spin_unlock(&recv_cq
->lock
);
2364 __release(&recv_cq
->lock
);
2365 __release(&send_cq
->lock
);
2369 static struct mlx5_ib_pd
*get_pd(struct mlx5_ib_qp
*qp
)
2371 return to_mpd(qp
->ibqp
.pd
);
2374 static void get_cqs(enum ib_qp_type qp_type
,
2375 struct ib_cq
*ib_send_cq
, struct ib_cq
*ib_recv_cq
,
2376 struct mlx5_ib_cq
**send_cq
, struct mlx5_ib_cq
**recv_cq
)
2379 case IB_QPT_XRC_TGT
:
2383 case MLX5_IB_QPT_REG_UMR
:
2384 case IB_QPT_XRC_INI
:
2385 *send_cq
= ib_send_cq
? to_mcq(ib_send_cq
) : NULL
;
2390 case MLX5_IB_QPT_HW_GSI
:
2394 case IB_QPT_RAW_PACKET
:
2395 *send_cq
= ib_send_cq
? to_mcq(ib_send_cq
) : NULL
;
2396 *recv_cq
= ib_recv_cq
? to_mcq(ib_recv_cq
) : NULL
;
2405 static int modify_raw_packet_qp(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
2406 const struct mlx5_modify_raw_qp_param
*raw_qp_param
,
2407 u8 lag_tx_affinity
);
2409 static void destroy_qp_common(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
2410 struct ib_udata
*udata
)
2412 struct mlx5_ib_cq
*send_cq
, *recv_cq
;
2413 struct mlx5_ib_qp_base
*base
;
2414 unsigned long flags
;
2417 if (qp
->ibqp
.rwq_ind_tbl
) {
2418 destroy_rss_raw_qp_tir(dev
, qp
);
2422 base
= (qp
->ibqp
.qp_type
== IB_QPT_RAW_PACKET
||
2423 qp
->flags
& IB_QP_CREATE_SOURCE_QPN
) ?
2424 &qp
->raw_packet_qp
.rq
.base
:
2427 if (qp
->state
!= IB_QPS_RESET
) {
2428 if (qp
->ibqp
.qp_type
!= IB_QPT_RAW_PACKET
&&
2429 !(qp
->flags
& IB_QP_CREATE_SOURCE_QPN
)) {
2430 err
= mlx5_core_qp_modify(dev
, MLX5_CMD_OP_2RST_QP
, 0,
2433 struct mlx5_modify_raw_qp_param raw_qp_param
= {
2434 .operation
= MLX5_CMD_OP_2RST_QP
2437 err
= modify_raw_packet_qp(dev
, qp
, &raw_qp_param
, 0);
2440 mlx5_ib_warn(dev
, "mlx5_ib: modify QP 0x%06x to RESET failed\n",
2444 get_cqs(qp
->ibqp
.qp_type
, qp
->ibqp
.send_cq
, qp
->ibqp
.recv_cq
,
2445 &send_cq
, &recv_cq
);
2447 spin_lock_irqsave(&dev
->reset_flow_resource_lock
, flags
);
2448 mlx5_ib_lock_cqs(send_cq
, recv_cq
);
2449 /* del from lists under both locks above to protect reset flow paths */
2450 list_del(&qp
->qps_list
);
2452 list_del(&qp
->cq_send_list
);
2455 list_del(&qp
->cq_recv_list
);
2458 __mlx5_ib_cq_clean(recv_cq
, base
->mqp
.qpn
,
2459 qp
->ibqp
.srq
? to_msrq(qp
->ibqp
.srq
) : NULL
);
2460 if (send_cq
!= recv_cq
)
2461 __mlx5_ib_cq_clean(send_cq
, base
->mqp
.qpn
,
2464 mlx5_ib_unlock_cqs(send_cq
, recv_cq
);
2465 spin_unlock_irqrestore(&dev
->reset_flow_resource_lock
, flags
);
2467 if (qp
->ibqp
.qp_type
== IB_QPT_RAW_PACKET
||
2468 qp
->flags
& IB_QP_CREATE_SOURCE_QPN
) {
2469 destroy_raw_packet_qp(dev
, qp
);
2471 err
= mlx5_core_destroy_qp(dev
, &base
->mqp
);
2473 mlx5_ib_warn(dev
, "failed to destroy QP 0x%x\n",
2477 destroy_qp(dev
, qp
, base
, udata
);
2480 static int create_dct(struct ib_pd
*pd
, struct mlx5_ib_qp
*qp
,
2481 struct mlx5_create_qp_params
*params
)
2483 struct ib_qp_init_attr
*attr
= params
->attr
;
2484 struct mlx5_ib_create_qp
*ucmd
= params
->ucmd
;
2485 u32 uidx
= params
->uidx
;
2488 qp
->dct
.in
= kzalloc(MLX5_ST_SZ_BYTES(create_dct_in
), GFP_KERNEL
);
2492 MLX5_SET(create_dct_in
, qp
->dct
.in
, uid
, to_mpd(pd
)->uid
);
2493 dctc
= MLX5_ADDR_OF(create_dct_in
, qp
->dct
.in
, dct_context_entry
);
2494 MLX5_SET(dctc
, dctc
, pd
, to_mpd(pd
)->pdn
);
2495 MLX5_SET(dctc
, dctc
, srqn_xrqn
, to_msrq(attr
->srq
)->msrq
.srqn
);
2496 MLX5_SET(dctc
, dctc
, cqn
, to_mcq(attr
->recv_cq
)->mcq
.cqn
);
2497 MLX5_SET64(dctc
, dctc
, dc_access_key
, ucmd
->access_key
);
2498 MLX5_SET(dctc
, dctc
, user_index
, uidx
);
2500 if (qp
->flags_en
& MLX5_QP_FLAG_SCATTER_CQE
) {
2501 int rcqe_sz
= mlx5_ib_get_cqe_size(attr
->recv_cq
);
2504 MLX5_SET(dctc
, dctc
, cs_res
, MLX5_RES_SCAT_DATA64_CQE
);
2507 qp
->state
= IB_QPS_RESET
;
2512 static int check_qp_type(struct mlx5_ib_dev
*dev
, struct ib_qp_init_attr
*attr
,
2513 enum ib_qp_type
*type
)
2515 if (attr
->qp_type
== IB_QPT_DRIVER
&& !MLX5_CAP_GEN(dev
->mdev
, dct
))
2518 switch (attr
->qp_type
) {
2519 case IB_QPT_XRC_TGT
:
2520 case IB_QPT_XRC_INI
:
2521 if (!MLX5_CAP_GEN(dev
->mdev
, xrc
))
2524 case IB_QPT_RAW_PACKET
:
2529 case MLX5_IB_QPT_HW_GSI
:
2530 case MLX5_IB_QPT_REG_UMR
:
2538 *type
= attr
->qp_type
;
2542 mlx5_ib_dbg(dev
, "Unsupported QP type %d\n", attr
->qp_type
);
2546 static int check_valid_flow(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
2547 struct ib_qp_init_attr
*attr
,
2548 struct ib_udata
*udata
)
2550 struct mlx5_ib_ucontext
*ucontext
= rdma_udata_to_drv_context(
2551 udata
, struct mlx5_ib_ucontext
, ibucontext
);
2554 /* Kernel create_qp callers */
2555 if (attr
->rwq_ind_tbl
)
2558 switch (attr
->qp_type
) {
2559 case IB_QPT_RAW_PACKET
:
2567 /* Userspace create_qp callers */
2568 if (attr
->qp_type
== IB_QPT_RAW_PACKET
&& !ucontext
->cqe_version
) {
2570 "Raw Packet QP is only supported for CQE version > 0\n");
2574 if (attr
->qp_type
!= IB_QPT_RAW_PACKET
&& attr
->rwq_ind_tbl
) {
2576 "Wrong QP type %d for the RWQ indirect table\n",
2581 switch (attr
->qp_type
) {
2583 case MLX5_IB_QPT_HW_GSI
:
2584 case MLX5_IB_QPT_REG_UMR
:
2586 mlx5_ib_dbg(dev
, "Kernel doesn't support QP type %d\n",
2594 * We don't need to see this warning, it means that kernel code
2595 * missing ib_pd. Placed here to catch developer's mistakes.
2597 WARN_ONCE(!pd
&& attr
->qp_type
!= IB_QPT_XRC_TGT
,
2598 "There is a missing PD pointer assignment\n");
2602 static void process_vendor_flag(struct mlx5_ib_dev
*dev
, int *flags
, int flag
,
2603 bool cond
, struct mlx5_ib_qp
*qp
)
2605 if (!(*flags
& flag
))
2609 qp
->flags_en
|= flag
;
2614 if (flag
== MLX5_QP_FLAG_SCATTER_CQE
) {
2616 * We don't return error if this flag was provided,
2617 * and mlx5 doesn't have right capability.
2619 *flags
&= ~MLX5_QP_FLAG_SCATTER_CQE
;
2622 mlx5_ib_dbg(dev
, "Vendor create QP flag 0x%X is not supported\n", flag
);
2625 static int process_vendor_flags(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
2626 void *ucmd
, struct ib_qp_init_attr
*attr
)
2628 struct mlx5_core_dev
*mdev
= dev
->mdev
;
2632 if (attr
->rwq_ind_tbl
)
2633 flags
= ((struct mlx5_ib_create_qp_rss
*)ucmd
)->flags
;
2635 flags
= ((struct mlx5_ib_create_qp
*)ucmd
)->flags
;
2637 switch (flags
& (MLX5_QP_FLAG_TYPE_DCT
| MLX5_QP_FLAG_TYPE_DCI
)) {
2638 case MLX5_QP_FLAG_TYPE_DCI
:
2639 qp
->type
= MLX5_IB_QPT_DCI
;
2641 case MLX5_QP_FLAG_TYPE_DCT
:
2642 qp
->type
= MLX5_IB_QPT_DCT
;
2645 if (qp
->type
!= IB_QPT_DRIVER
)
2648 * It is IB_QPT_DRIVER and or no subtype or
2649 * wrong subtype were provided.
2654 process_vendor_flag(dev
, &flags
, MLX5_QP_FLAG_TYPE_DCI
, true, qp
);
2655 process_vendor_flag(dev
, &flags
, MLX5_QP_FLAG_TYPE_DCT
, true, qp
);
2657 process_vendor_flag(dev
, &flags
, MLX5_QP_FLAG_SIGNATURE
, true, qp
);
2658 process_vendor_flag(dev
, &flags
, MLX5_QP_FLAG_SCATTER_CQE
,
2659 MLX5_CAP_GEN(mdev
, sctr_data_cqe
), qp
);
2661 if (qp
->type
== IB_QPT_RAW_PACKET
) {
2662 cond
= MLX5_CAP_ETH(mdev
, tunnel_stateless_vxlan
) ||
2663 MLX5_CAP_ETH(mdev
, tunnel_stateless_gre
) ||
2664 MLX5_CAP_ETH(mdev
, tunnel_stateless_geneve_rx
);
2665 process_vendor_flag(dev
, &flags
, MLX5_QP_FLAG_TUNNEL_OFFLOADS
,
2667 process_vendor_flag(dev
, &flags
,
2668 MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC
, true,
2670 process_vendor_flag(dev
, &flags
,
2671 MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC
, true,
2675 if (qp
->type
== IB_QPT_RC
)
2676 process_vendor_flag(dev
, &flags
,
2677 MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE
,
2678 MLX5_CAP_GEN(mdev
, qp_packet_based
), qp
);
2680 process_vendor_flag(dev
, &flags
, MLX5_QP_FLAG_BFREG_INDEX
, true, qp
);
2681 process_vendor_flag(dev
, &flags
, MLX5_QP_FLAG_UAR_PAGE_INDEX
, true, qp
);
2683 cond
= qp
->flags_en
& ~(MLX5_QP_FLAG_TUNNEL_OFFLOADS
|
2684 MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC
|
2685 MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC
);
2686 if (attr
->rwq_ind_tbl
&& cond
) {
2687 mlx5_ib_dbg(dev
, "RSS RAW QP has unsupported flags 0x%X\n",
2693 mlx5_ib_dbg(dev
, "udata has unsupported flags 0x%X\n", flags
);
2695 return (flags
) ? -EINVAL
: 0;
2698 static void process_create_flag(struct mlx5_ib_dev
*dev
, int *flags
, int flag
,
2699 bool cond
, struct mlx5_ib_qp
*qp
)
2701 if (!(*flags
& flag
))
2710 if (flag
== MLX5_IB_QP_CREATE_WC_TEST
) {
2712 * Special case, if condition didn't meet, it won't be error,
2713 * just different in-kernel flow.
2715 *flags
&= ~MLX5_IB_QP_CREATE_WC_TEST
;
2718 mlx5_ib_dbg(dev
, "Verbs create QP flag 0x%X is not supported\n", flag
);
2721 static int process_create_flags(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
2722 struct ib_qp_init_attr
*attr
)
2724 enum ib_qp_type qp_type
= qp
->type
;
2725 struct mlx5_core_dev
*mdev
= dev
->mdev
;
2726 int create_flags
= attr
->create_flags
;
2729 if (qp_type
== MLX5_IB_QPT_DCT
)
2730 return (create_flags
) ? -EINVAL
: 0;
2732 if (qp_type
== IB_QPT_RAW_PACKET
&& attr
->rwq_ind_tbl
)
2733 return (create_flags
) ? -EINVAL
: 0;
2735 process_create_flag(dev
, &create_flags
,
2736 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
,
2737 MLX5_CAP_GEN(mdev
, block_lb_mc
), qp
);
2738 process_create_flag(dev
, &create_flags
, IB_QP_CREATE_CROSS_CHANNEL
,
2739 MLX5_CAP_GEN(mdev
, cd
), qp
);
2740 process_create_flag(dev
, &create_flags
, IB_QP_CREATE_MANAGED_SEND
,
2741 MLX5_CAP_GEN(mdev
, cd
), qp
);
2742 process_create_flag(dev
, &create_flags
, IB_QP_CREATE_MANAGED_RECV
,
2743 MLX5_CAP_GEN(mdev
, cd
), qp
);
2745 if (qp_type
== IB_QPT_UD
) {
2746 process_create_flag(dev
, &create_flags
,
2747 IB_QP_CREATE_IPOIB_UD_LSO
,
2748 MLX5_CAP_GEN(mdev
, ipoib_basic_offloads
),
2750 cond
= MLX5_CAP_GEN(mdev
, port_type
) == MLX5_CAP_PORT_TYPE_IB
;
2751 process_create_flag(dev
, &create_flags
, IB_QP_CREATE_SOURCE_QPN
,
2755 if (qp_type
== IB_QPT_RAW_PACKET
) {
2756 cond
= MLX5_CAP_GEN(mdev
, eth_net_offloads
) &&
2757 MLX5_CAP_ETH(mdev
, scatter_fcs
);
2758 process_create_flag(dev
, &create_flags
,
2759 IB_QP_CREATE_SCATTER_FCS
, cond
, qp
);
2761 cond
= MLX5_CAP_GEN(mdev
, eth_net_offloads
) &&
2762 MLX5_CAP_ETH(mdev
, vlan_cap
);
2763 process_create_flag(dev
, &create_flags
,
2764 IB_QP_CREATE_CVLAN_STRIPPING
, cond
, qp
);
2767 process_create_flag(dev
, &create_flags
,
2768 IB_QP_CREATE_PCI_WRITE_END_PADDING
,
2769 MLX5_CAP_GEN(mdev
, end_pad
), qp
);
2771 process_create_flag(dev
, &create_flags
, MLX5_IB_QP_CREATE_WC_TEST
,
2772 qp_type
!= MLX5_IB_QPT_REG_UMR
, qp
);
2773 process_create_flag(dev
, &create_flags
, MLX5_IB_QP_CREATE_SQPN_QP1
,
2777 mlx5_ib_dbg(dev
, "Create QP has unsupported flags 0x%X\n",
2780 return (create_flags
) ? -EINVAL
: 0;
2783 static size_t process_udata_size(struct ib_qp_init_attr
*attr
,
2784 struct ib_udata
*udata
)
2786 size_t ucmd
= sizeof(struct mlx5_ib_create_qp
);
2787 size_t inlen
= udata
->inlen
;
2789 if (attr
->qp_type
== IB_QPT_DRIVER
)
2790 return (inlen
< ucmd
) ? 0 : ucmd
;
2792 if (!attr
->rwq_ind_tbl
)
2795 if (inlen
< offsetofend(struct mlx5_ib_create_qp_rss
, flags
))
2798 ucmd
= sizeof(struct mlx5_ib_create_qp_rss
);
2799 if (inlen
> ucmd
&& !ib_is_udata_cleared(udata
, ucmd
, inlen
- ucmd
))
2802 return min(ucmd
, inlen
);
2805 static int create_raw_qp(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
2806 struct mlx5_ib_qp
*qp
,
2807 struct mlx5_create_qp_params
*params
)
2809 if (params
->is_rss_raw
)
2810 return create_rss_raw_qp_tir(dev
, pd
, qp
, params
);
2812 return create_user_qp(dev
, pd
, qp
, params
);
2815 static int check_qp_attr(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
2816 struct ib_qp_init_attr
*attr
)
2821 case MLX5_IB_QPT_DCT
:
2822 ret
= (!attr
->srq
|| !attr
->recv_cq
) ? -EINVAL
: 0;
2824 case MLX5_IB_QPT_DCI
:
2825 ret
= (attr
->cap
.max_recv_wr
|| attr
->cap
.max_recv_sge
) ?
2829 case IB_QPT_RAW_PACKET
:
2830 ret
= (attr
->rwq_ind_tbl
&& attr
->send_cq
) ? -EINVAL
: 0;
2837 mlx5_ib_dbg(dev
, "QP type %d has wrong attributes\n", qp
->type
);
2842 static int get_qp_uidx(struct mlx5_ib_qp
*qp
,
2843 struct mlx5_create_qp_params
*params
)
2845 struct mlx5_ib_create_qp
*ucmd
= params
->ucmd
;
2846 struct ib_udata
*udata
= params
->udata
;
2847 struct mlx5_ib_ucontext
*ucontext
= rdma_udata_to_drv_context(
2848 udata
, struct mlx5_ib_ucontext
, ibucontext
);
2850 if (params
->is_rss_raw
)
2853 return get_qp_user_index(ucontext
, ucmd
, sizeof(*ucmd
), ¶ms
->uidx
);
2856 struct ib_qp
*mlx5_ib_create_qp(struct ib_pd
*pd
, struct ib_qp_init_attr
*attr
,
2857 struct ib_udata
*udata
)
2859 struct mlx5_create_qp_params params
= {};
2860 struct mlx5_ib_dev
*dev
;
2861 struct mlx5_ib_qp
*qp
;
2862 enum ib_qp_type type
;
2866 dev
= pd
? to_mdev(pd
->device
) :
2867 to_mdev(to_mxrcd(attr
->xrcd
)->ibxrcd
.device
);
2869 err
= check_qp_type(dev
, attr
, &type
);
2871 return ERR_PTR(err
);
2873 err
= check_valid_flow(dev
, pd
, attr
, udata
);
2875 return ERR_PTR(err
);
2877 if (attr
->qp_type
== IB_QPT_GSI
)
2878 return mlx5_ib_gsi_create_qp(pd
, attr
);
2880 params
.udata
= udata
;
2881 params
.uidx
= MLX5_IB_DEFAULT_UIDX
;
2883 params
.is_rss_raw
= !!attr
->rwq_ind_tbl
;
2886 params
.inlen
= process_udata_size(attr
, udata
);
2888 return ERR_PTR(-EINVAL
);
2890 params
.ucmd
= kzalloc(params
.inlen
, GFP_KERNEL
);
2892 return ERR_PTR(-ENOMEM
);
2894 err
= ib_copy_from_udata(params
.ucmd
, udata
, params
.inlen
);
2899 qp
= kzalloc(sizeof(*qp
), GFP_KERNEL
);
2907 err
= process_vendor_flags(dev
, qp
, params
.ucmd
, attr
);
2911 err
= get_qp_uidx(qp
, ¶ms
);
2915 err
= process_create_flags(dev
, qp
, attr
);
2919 err
= check_qp_attr(dev
, qp
, attr
);
2924 case IB_QPT_RAW_PACKET
:
2925 err
= create_raw_qp(dev
, pd
, qp
, ¶ms
);
2927 case MLX5_IB_QPT_DCT
:
2928 err
= create_dct(pd
, qp
, ¶ms
);
2930 case IB_QPT_XRC_TGT
:
2931 xrcdn
= to_mxrcd(attr
->xrcd
)->xrcdn
;
2932 err
= create_xrc_tgt_qp(dev
, qp
, ¶ms
);
2936 err
= create_user_qp(dev
, pd
, qp
, ¶ms
);
2938 err
= create_kernel_qp(dev
, pd
, qp
, ¶ms
);
2941 mlx5_ib_err(dev
, "create_qp failed %d\n", err
);
2947 if (is_qp0(attr
->qp_type
))
2948 qp
->ibqp
.qp_num
= 0;
2949 else if (is_qp1(attr
->qp_type
))
2950 qp
->ibqp
.qp_num
= 1;
2952 qp
->ibqp
.qp_num
= qp
->trans_qp
.base
.mqp
.qpn
;
2954 qp
->trans_qp
.xrcdn
= xrcdn
;
2962 return ERR_PTR(err
);
2965 static int mlx5_ib_destroy_dct(struct mlx5_ib_qp
*mqp
)
2967 struct mlx5_ib_dev
*dev
= to_mdev(mqp
->ibqp
.device
);
2969 if (mqp
->state
== IB_QPS_RTR
) {
2972 err
= mlx5_core_destroy_dct(dev
, &mqp
->dct
.mdct
);
2974 mlx5_ib_warn(dev
, "failed to destroy DCT %d\n", err
);
2984 int mlx5_ib_destroy_qp(struct ib_qp
*qp
, struct ib_udata
*udata
)
2986 struct mlx5_ib_dev
*dev
= to_mdev(qp
->device
);
2987 struct mlx5_ib_qp
*mqp
= to_mqp(qp
);
2989 if (unlikely(qp
->qp_type
== IB_QPT_GSI
))
2990 return mlx5_ib_gsi_destroy_qp(qp
);
2992 if (mqp
->type
== MLX5_IB_QPT_DCT
)
2993 return mlx5_ib_destroy_dct(mqp
);
2995 destroy_qp_common(dev
, mqp
, udata
);
3002 static int to_mlx5_access_flags(struct mlx5_ib_qp
*qp
,
3003 const struct ib_qp_attr
*attr
,
3004 int attr_mask
, __be32
*hw_access_flags_be
)
3007 u32 access_flags
, hw_access_flags
= 0;
3009 struct mlx5_ib_dev
*dev
= to_mdev(qp
->ibqp
.device
);
3011 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
3012 dest_rd_atomic
= attr
->max_dest_rd_atomic
;
3014 dest_rd_atomic
= qp
->trans_qp
.resp_depth
;
3016 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
3017 access_flags
= attr
->qp_access_flags
;
3019 access_flags
= qp
->trans_qp
.atomic_rd_en
;
3021 if (!dest_rd_atomic
)
3022 access_flags
&= IB_ACCESS_REMOTE_WRITE
;
3024 if (access_flags
& IB_ACCESS_REMOTE_READ
)
3025 hw_access_flags
|= MLX5_QP_BIT_RRE
;
3026 if (access_flags
& IB_ACCESS_REMOTE_ATOMIC
) {
3029 atomic_mode
= get_atomic_mode(dev
, qp
->ibqp
.qp_type
);
3030 if (atomic_mode
< 0)
3033 hw_access_flags
|= MLX5_QP_BIT_RAE
;
3034 hw_access_flags
|= atomic_mode
<< MLX5_ATOMIC_MODE_OFFSET
;
3037 if (access_flags
& IB_ACCESS_REMOTE_WRITE
)
3038 hw_access_flags
|= MLX5_QP_BIT_RWE
;
3040 *hw_access_flags_be
= cpu_to_be32(hw_access_flags
);
3046 MLX5_PATH_FLAG_FL
= 1 << 0,
3047 MLX5_PATH_FLAG_FREE_AR
= 1 << 1,
3048 MLX5_PATH_FLAG_COUNTER
= 1 << 2,
3051 static int ib_rate_to_mlx5(struct mlx5_ib_dev
*dev
, u8 rate
)
3053 if (rate
== IB_RATE_PORT_CURRENT
)
3056 if (rate
< IB_RATE_2_5_GBPS
|| rate
> IB_RATE_600_GBPS
)
3059 while (rate
!= IB_RATE_PORT_CURRENT
&&
3060 !(1 << (rate
+ MLX5_STAT_RATE_OFFSET
) &
3061 MLX5_CAP_GEN(dev
->mdev
, stat_rate_support
)))
3064 return rate
? rate
+ MLX5_STAT_RATE_OFFSET
: rate
;
3067 static int modify_raw_packet_eth_prio(struct mlx5_core_dev
*dev
,
3068 struct mlx5_ib_sq
*sq
, u8 sl
,
3076 inlen
= MLX5_ST_SZ_BYTES(modify_tis_in
);
3077 in
= kvzalloc(inlen
, GFP_KERNEL
);
3081 MLX5_SET(modify_tis_in
, in
, bitmask
.prio
, 1);
3082 MLX5_SET(modify_tis_in
, in
, uid
, to_mpd(pd
)->uid
);
3084 tisc
= MLX5_ADDR_OF(modify_tis_in
, in
, ctx
);
3085 MLX5_SET(tisc
, tisc
, prio
, ((sl
& 0x7) << 1));
3087 err
= mlx5_core_modify_tis(dev
, sq
->tisn
, in
);
3094 static int modify_raw_packet_tx_affinity(struct mlx5_core_dev
*dev
,
3095 struct mlx5_ib_sq
*sq
, u8 tx_affinity
,
3103 inlen
= MLX5_ST_SZ_BYTES(modify_tis_in
);
3104 in
= kvzalloc(inlen
, GFP_KERNEL
);
3108 MLX5_SET(modify_tis_in
, in
, bitmask
.lag_tx_port_affinity
, 1);
3109 MLX5_SET(modify_tis_in
, in
, uid
, to_mpd(pd
)->uid
);
3111 tisc
= MLX5_ADDR_OF(modify_tis_in
, in
, ctx
);
3112 MLX5_SET(tisc
, tisc
, lag_tx_port_affinity
, tx_affinity
);
3114 err
= mlx5_core_modify_tis(dev
, sq
->tisn
, in
);
3121 static int mlx5_set_path(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
3122 const struct rdma_ah_attr
*ah
,
3123 struct mlx5_qp_path
*path
, u8 port
, int attr_mask
,
3124 u32 path_flags
, const struct ib_qp_attr
*attr
,
3127 const struct ib_global_route
*grh
= rdma_ah_read_grh(ah
);
3129 enum ib_gid_type gid_type
;
3130 u8 ah_flags
= rdma_ah_get_ah_flags(ah
);
3131 u8 sl
= rdma_ah_get_sl(ah
);
3133 if (attr_mask
& IB_QP_PKEY_INDEX
)
3134 path
->pkey_index
= cpu_to_be16(alt
? attr
->alt_pkey_index
:
3137 if (ah_flags
& IB_AH_GRH
) {
3138 if (grh
->sgid_index
>=
3139 dev
->mdev
->port_caps
[port
- 1].gid_table_len
) {
3140 pr_err("sgid_index (%u) too large. max is %d\n",
3142 dev
->mdev
->port_caps
[port
- 1].gid_table_len
);
3147 if (ah
->type
== RDMA_AH_ATTR_TYPE_ROCE
) {
3148 if (!(ah_flags
& IB_AH_GRH
))
3151 memcpy(path
->rmac
, ah
->roce
.dmac
, sizeof(ah
->roce
.dmac
));
3152 if (qp
->ibqp
.qp_type
== IB_QPT_RC
||
3153 qp
->ibqp
.qp_type
== IB_QPT_UC
||
3154 qp
->ibqp
.qp_type
== IB_QPT_XRC_INI
||
3155 qp
->ibqp
.qp_type
== IB_QPT_XRC_TGT
)
3157 mlx5_get_roce_udp_sport(dev
, ah
->grh
.sgid_attr
);
3158 path
->dci_cfi_prio_sl
= (sl
& 0x7) << 4;
3159 gid_type
= ah
->grh
.sgid_attr
->gid_type
;
3160 if (gid_type
== IB_GID_TYPE_ROCE_UDP_ENCAP
)
3161 path
->ecn_dscp
= (grh
->traffic_class
>> 2) & 0x3f;
3163 path
->fl_free_ar
= (path_flags
& MLX5_PATH_FLAG_FL
) ? 0x80 : 0;
3165 (path_flags
& MLX5_PATH_FLAG_FREE_AR
) ? 0x40 : 0;
3166 path
->rlid
= cpu_to_be16(rdma_ah_get_dlid(ah
));
3167 path
->grh_mlid
= rdma_ah_get_path_bits(ah
) & 0x7f;
3168 if (ah_flags
& IB_AH_GRH
)
3169 path
->grh_mlid
|= 1 << 7;
3170 path
->dci_cfi_prio_sl
= sl
& 0xf;
3173 if (ah_flags
& IB_AH_GRH
) {
3174 path
->mgid_index
= grh
->sgid_index
;
3175 path
->hop_limit
= grh
->hop_limit
;
3176 path
->tclass_flowlabel
=
3177 cpu_to_be32((grh
->traffic_class
<< 20) |
3179 memcpy(path
->rgid
, grh
->dgid
.raw
, 16);
3182 err
= ib_rate_to_mlx5(dev
, rdma_ah_get_static_rate(ah
));
3185 path
->static_rate
= err
;
3188 if (attr_mask
& IB_QP_TIMEOUT
)
3189 path
->ackto_lt
= (alt
? attr
->alt_timeout
: attr
->timeout
) << 3;
3191 if ((qp
->ibqp
.qp_type
== IB_QPT_RAW_PACKET
) && qp
->sq
.wqe_cnt
)
3192 return modify_raw_packet_eth_prio(dev
->mdev
,
3193 &qp
->raw_packet_qp
.sq
,
3194 sl
& 0xf, qp
->ibqp
.pd
);
3199 static enum mlx5_qp_optpar opt_mask
[MLX5_QP_NUM_STATE
][MLX5_QP_NUM_STATE
][MLX5_QP_ST_MAX
] = {
3200 [MLX5_QP_STATE_INIT
] = {
3201 [MLX5_QP_STATE_INIT
] = {
3202 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_RRE
|
3203 MLX5_QP_OPTPAR_RAE
|
3204 MLX5_QP_OPTPAR_RWE
|
3205 MLX5_QP_OPTPAR_PKEY_INDEX
|
3206 MLX5_QP_OPTPAR_PRI_PORT
,
3207 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_RWE
|
3208 MLX5_QP_OPTPAR_PKEY_INDEX
|
3209 MLX5_QP_OPTPAR_PRI_PORT
,
3210 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_PKEY_INDEX
|
3211 MLX5_QP_OPTPAR_Q_KEY
|
3212 MLX5_QP_OPTPAR_PRI_PORT
,
3213 [MLX5_QP_ST_XRC
] = MLX5_QP_OPTPAR_RRE
|
3214 MLX5_QP_OPTPAR_RAE
|
3215 MLX5_QP_OPTPAR_RWE
|
3216 MLX5_QP_OPTPAR_PKEY_INDEX
|
3217 MLX5_QP_OPTPAR_PRI_PORT
,
3219 [MLX5_QP_STATE_RTR
] = {
3220 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
3221 MLX5_QP_OPTPAR_RRE
|
3222 MLX5_QP_OPTPAR_RAE
|
3223 MLX5_QP_OPTPAR_RWE
|
3224 MLX5_QP_OPTPAR_PKEY_INDEX
,
3225 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
3226 MLX5_QP_OPTPAR_RWE
|
3227 MLX5_QP_OPTPAR_PKEY_INDEX
,
3228 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_PKEY_INDEX
|
3229 MLX5_QP_OPTPAR_Q_KEY
,
3230 [MLX5_QP_ST_MLX
] = MLX5_QP_OPTPAR_PKEY_INDEX
|
3231 MLX5_QP_OPTPAR_Q_KEY
,
3232 [MLX5_QP_ST_XRC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
3233 MLX5_QP_OPTPAR_RRE
|
3234 MLX5_QP_OPTPAR_RAE
|
3235 MLX5_QP_OPTPAR_RWE
|
3236 MLX5_QP_OPTPAR_PKEY_INDEX
,
3239 [MLX5_QP_STATE_RTR
] = {
3240 [MLX5_QP_STATE_RTS
] = {
3241 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
3242 MLX5_QP_OPTPAR_RRE
|
3243 MLX5_QP_OPTPAR_RAE
|
3244 MLX5_QP_OPTPAR_RWE
|
3245 MLX5_QP_OPTPAR_PM_STATE
|
3246 MLX5_QP_OPTPAR_RNR_TIMEOUT
,
3247 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
3248 MLX5_QP_OPTPAR_RWE
|
3249 MLX5_QP_OPTPAR_PM_STATE
,
3250 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_Q_KEY
,
3251 [MLX5_QP_ST_XRC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
3252 MLX5_QP_OPTPAR_RRE
|
3253 MLX5_QP_OPTPAR_RAE
|
3254 MLX5_QP_OPTPAR_RWE
|
3255 MLX5_QP_OPTPAR_PM_STATE
|
3256 MLX5_QP_OPTPAR_RNR_TIMEOUT
,
3259 [MLX5_QP_STATE_RTS
] = {
3260 [MLX5_QP_STATE_RTS
] = {
3261 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_RRE
|
3262 MLX5_QP_OPTPAR_RAE
|
3263 MLX5_QP_OPTPAR_RWE
|
3264 MLX5_QP_OPTPAR_RNR_TIMEOUT
|
3265 MLX5_QP_OPTPAR_PM_STATE
|
3266 MLX5_QP_OPTPAR_ALT_ADDR_PATH
,
3267 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_RWE
|
3268 MLX5_QP_OPTPAR_PM_STATE
|
3269 MLX5_QP_OPTPAR_ALT_ADDR_PATH
,
3270 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_Q_KEY
|
3271 MLX5_QP_OPTPAR_SRQN
|
3272 MLX5_QP_OPTPAR_CQN_RCV
,
3273 [MLX5_QP_ST_XRC
] = MLX5_QP_OPTPAR_RRE
|
3274 MLX5_QP_OPTPAR_RAE
|
3275 MLX5_QP_OPTPAR_RWE
|
3276 MLX5_QP_OPTPAR_RNR_TIMEOUT
|
3277 MLX5_QP_OPTPAR_PM_STATE
|
3278 MLX5_QP_OPTPAR_ALT_ADDR_PATH
,
3281 [MLX5_QP_STATE_SQER
] = {
3282 [MLX5_QP_STATE_RTS
] = {
3283 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_Q_KEY
,
3284 [MLX5_QP_ST_MLX
] = MLX5_QP_OPTPAR_Q_KEY
,
3285 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_RWE
,
3286 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_RNR_TIMEOUT
|
3287 MLX5_QP_OPTPAR_RWE
|
3288 MLX5_QP_OPTPAR_RAE
|
3290 [MLX5_QP_ST_XRC
] = MLX5_QP_OPTPAR_RNR_TIMEOUT
|
3291 MLX5_QP_OPTPAR_RWE
|
3292 MLX5_QP_OPTPAR_RAE
|
3298 static int ib_nr_to_mlx5_nr(int ib_mask
)
3303 case IB_QP_CUR_STATE
:
3305 case IB_QP_EN_SQD_ASYNC_NOTIFY
:
3307 case IB_QP_ACCESS_FLAGS
:
3308 return MLX5_QP_OPTPAR_RWE
| MLX5_QP_OPTPAR_RRE
|
3310 case IB_QP_PKEY_INDEX
:
3311 return MLX5_QP_OPTPAR_PKEY_INDEX
;
3313 return MLX5_QP_OPTPAR_PRI_PORT
;
3315 return MLX5_QP_OPTPAR_Q_KEY
;
3317 return MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH
|
3318 MLX5_QP_OPTPAR_PRI_PORT
;
3319 case IB_QP_PATH_MTU
:
3322 return MLX5_QP_OPTPAR_ACK_TIMEOUT
;
3323 case IB_QP_RETRY_CNT
:
3324 return MLX5_QP_OPTPAR_RETRY_COUNT
;
3325 case IB_QP_RNR_RETRY
:
3326 return MLX5_QP_OPTPAR_RNR_RETRY
;
3329 case IB_QP_MAX_QP_RD_ATOMIC
:
3330 return MLX5_QP_OPTPAR_SRA_MAX
;
3331 case IB_QP_ALT_PATH
:
3332 return MLX5_QP_OPTPAR_ALT_ADDR_PATH
;
3333 case IB_QP_MIN_RNR_TIMER
:
3334 return MLX5_QP_OPTPAR_RNR_TIMEOUT
;
3337 case IB_QP_MAX_DEST_RD_ATOMIC
:
3338 return MLX5_QP_OPTPAR_RRA_MAX
| MLX5_QP_OPTPAR_RWE
|
3339 MLX5_QP_OPTPAR_RRE
| MLX5_QP_OPTPAR_RAE
;
3340 case IB_QP_PATH_MIG_STATE
:
3341 return MLX5_QP_OPTPAR_PM_STATE
;
3344 case IB_QP_DEST_QPN
:
3350 static int ib_mask_to_mlx5_opt(int ib_mask
)
3355 for (i
= 0; i
< 8 * sizeof(int); i
++) {
3356 if ((1 << i
) & ib_mask
)
3357 result
|= ib_nr_to_mlx5_nr(1 << i
);
3363 static int modify_raw_packet_qp_rq(
3364 struct mlx5_ib_dev
*dev
, struct mlx5_ib_rq
*rq
, int new_state
,
3365 const struct mlx5_modify_raw_qp_param
*raw_qp_param
, struct ib_pd
*pd
)
3372 inlen
= MLX5_ST_SZ_BYTES(modify_rq_in
);
3373 in
= kvzalloc(inlen
, GFP_KERNEL
);
3377 MLX5_SET(modify_rq_in
, in
, rq_state
, rq
->state
);
3378 MLX5_SET(modify_rq_in
, in
, uid
, to_mpd(pd
)->uid
);
3380 rqc
= MLX5_ADDR_OF(modify_rq_in
, in
, ctx
);
3381 MLX5_SET(rqc
, rqc
, state
, new_state
);
3383 if (raw_qp_param
->set_mask
& MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID
) {
3384 if (MLX5_CAP_GEN(dev
->mdev
, modify_rq_counter_set_id
)) {
3385 MLX5_SET64(modify_rq_in
, in
, modify_bitmask
,
3386 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID
);
3387 MLX5_SET(rqc
, rqc
, counter_set_id
, raw_qp_param
->rq_q_ctr_id
);
3391 "RAW PACKET QP counters are not supported on current FW\n");
3394 err
= mlx5_core_modify_rq(dev
->mdev
, rq
->base
.mqp
.qpn
, in
);
3398 rq
->state
= new_state
;
3405 static int modify_raw_packet_qp_sq(
3406 struct mlx5_core_dev
*dev
, struct mlx5_ib_sq
*sq
, int new_state
,
3407 const struct mlx5_modify_raw_qp_param
*raw_qp_param
, struct ib_pd
*pd
)
3409 struct mlx5_ib_qp
*ibqp
= sq
->base
.container_mibqp
;
3410 struct mlx5_rate_limit old_rl
= ibqp
->rl
;
3411 struct mlx5_rate_limit new_rl
= old_rl
;
3412 bool new_rate_added
= false;
3419 inlen
= MLX5_ST_SZ_BYTES(modify_sq_in
);
3420 in
= kvzalloc(inlen
, GFP_KERNEL
);
3424 MLX5_SET(modify_sq_in
, in
, uid
, to_mpd(pd
)->uid
);
3425 MLX5_SET(modify_sq_in
, in
, sq_state
, sq
->state
);
3427 sqc
= MLX5_ADDR_OF(modify_sq_in
, in
, ctx
);
3428 MLX5_SET(sqc
, sqc
, state
, new_state
);
3430 if (raw_qp_param
->set_mask
& MLX5_RAW_QP_RATE_LIMIT
) {
3431 if (new_state
!= MLX5_SQC_STATE_RDY
)
3432 pr_warn("%s: Rate limit can only be changed when SQ is moving to RDY\n",
3435 new_rl
= raw_qp_param
->rl
;
3438 if (!mlx5_rl_are_equal(&old_rl
, &new_rl
)) {
3440 err
= mlx5_rl_add_rate(dev
, &rl_index
, &new_rl
);
3442 pr_err("Failed configuring rate limit(err %d): \
3443 rate %u, max_burst_sz %u, typical_pkt_sz %u\n",
3444 err
, new_rl
.rate
, new_rl
.max_burst_sz
,
3445 new_rl
.typical_pkt_sz
);
3449 new_rate_added
= true;
3452 MLX5_SET64(modify_sq_in
, in
, modify_bitmask
, 1);
3453 /* index 0 means no limit */
3454 MLX5_SET(sqc
, sqc
, packet_pacing_rate_limit_index
, rl_index
);
3457 err
= mlx5_core_modify_sq(dev
, sq
->base
.mqp
.qpn
, in
);
3459 /* Remove new rate from table if failed */
3461 mlx5_rl_remove_rate(dev
, &new_rl
);
3465 /* Only remove the old rate after new rate was set */
3466 if ((old_rl
.rate
&& !mlx5_rl_are_equal(&old_rl
, &new_rl
)) ||
3467 (new_state
!= MLX5_SQC_STATE_RDY
)) {
3468 mlx5_rl_remove_rate(dev
, &old_rl
);
3469 if (new_state
!= MLX5_SQC_STATE_RDY
)
3470 memset(&new_rl
, 0, sizeof(new_rl
));
3474 sq
->state
= new_state
;
3481 static int modify_raw_packet_qp(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
3482 const struct mlx5_modify_raw_qp_param
*raw_qp_param
,
3485 struct mlx5_ib_raw_packet_qp
*raw_packet_qp
= &qp
->raw_packet_qp
;
3486 struct mlx5_ib_rq
*rq
= &raw_packet_qp
->rq
;
3487 struct mlx5_ib_sq
*sq
= &raw_packet_qp
->sq
;
3488 int modify_rq
= !!qp
->rq
.wqe_cnt
;
3489 int modify_sq
= !!qp
->sq
.wqe_cnt
;
3494 switch (raw_qp_param
->operation
) {
3495 case MLX5_CMD_OP_RST2INIT_QP
:
3496 rq_state
= MLX5_RQC_STATE_RDY
;
3497 sq_state
= MLX5_SQC_STATE_RDY
;
3499 case MLX5_CMD_OP_2ERR_QP
:
3500 rq_state
= MLX5_RQC_STATE_ERR
;
3501 sq_state
= MLX5_SQC_STATE_ERR
;
3503 case MLX5_CMD_OP_2RST_QP
:
3504 rq_state
= MLX5_RQC_STATE_RST
;
3505 sq_state
= MLX5_SQC_STATE_RST
;
3507 case MLX5_CMD_OP_RTR2RTS_QP
:
3508 case MLX5_CMD_OP_RTS2RTS_QP
:
3509 if (raw_qp_param
->set_mask
==
3510 MLX5_RAW_QP_RATE_LIMIT
) {
3512 sq_state
= sq
->state
;
3514 return raw_qp_param
->set_mask
? -EINVAL
: 0;
3517 case MLX5_CMD_OP_INIT2INIT_QP
:
3518 case MLX5_CMD_OP_INIT2RTR_QP
:
3519 if (raw_qp_param
->set_mask
)
3529 err
= modify_raw_packet_qp_rq(dev
, rq
, rq_state
, raw_qp_param
,
3536 struct mlx5_flow_handle
*flow_rule
;
3539 err
= modify_raw_packet_tx_affinity(dev
->mdev
, sq
,
3546 flow_rule
= create_flow_rule_vport_sq(dev
, sq
,
3547 raw_qp_param
->port
);
3548 if (IS_ERR(flow_rule
))
3549 return PTR_ERR(flow_rule
);
3551 err
= modify_raw_packet_qp_sq(dev
->mdev
, sq
, sq_state
,
3552 raw_qp_param
, qp
->ibqp
.pd
);
3555 mlx5_del_flow_rules(flow_rule
);
3560 destroy_flow_rule_vport_sq(sq
);
3561 sq
->flow_rule
= flow_rule
;
3570 static unsigned int get_tx_affinity(struct mlx5_ib_dev
*dev
,
3571 struct mlx5_ib_pd
*pd
,
3572 struct mlx5_ib_qp_base
*qp_base
,
3573 u8 port_num
, struct ib_udata
*udata
)
3575 struct mlx5_ib_ucontext
*ucontext
= rdma_udata_to_drv_context(
3576 udata
, struct mlx5_ib_ucontext
, ibucontext
);
3577 unsigned int tx_port_affinity
;
3580 tx_port_affinity
= (unsigned int)atomic_add_return(
3581 1, &ucontext
->tx_port_affinity
) %
3584 mlx5_ib_dbg(dev
, "Set tx affinity 0x%x to qpn 0x%x ucontext %p\n",
3585 tx_port_affinity
, qp_base
->mqp
.qpn
, ucontext
);
3588 (unsigned int)atomic_add_return(
3589 1, &dev
->port
[port_num
].roce
.tx_port_affinity
) %
3592 mlx5_ib_dbg(dev
, "Set tx affinity 0x%x to qpn 0x%x\n",
3593 tx_port_affinity
, qp_base
->mqp
.qpn
);
3596 return tx_port_affinity
;
3599 static int __mlx5_ib_qp_set_counter(struct ib_qp
*qp
,
3600 struct rdma_counter
*counter
)
3602 struct mlx5_ib_dev
*dev
= to_mdev(qp
->device
);
3603 struct mlx5_ib_qp
*mqp
= to_mqp(qp
);
3604 struct mlx5_qp_context context
= {};
3605 struct mlx5_ib_qp_base
*base
;
3609 set_id
= counter
->id
;
3611 set_id
= mlx5_ib_get_counters_id(dev
, mqp
->port
- 1);
3613 base
= &mqp
->trans_qp
.base
;
3614 context
.qp_counter_set_usr_page
&= cpu_to_be32(0xffffff);
3615 context
.qp_counter_set_usr_page
|= cpu_to_be32(set_id
<< 24);
3616 return mlx5_core_qp_modify(dev
, MLX5_CMD_OP_RTS2RTS_QP
,
3617 MLX5_QP_OPTPAR_COUNTER_SET_ID
, &context
,
3621 static int __mlx5_ib_modify_qp(struct ib_qp
*ibqp
,
3622 const struct ib_qp_attr
*attr
, int attr_mask
,
3623 enum ib_qp_state cur_state
,
3624 enum ib_qp_state new_state
,
3625 const struct mlx5_ib_modify_qp
*ucmd
,
3626 struct ib_udata
*udata
)
3628 static const u16 optab
[MLX5_QP_NUM_STATE
][MLX5_QP_NUM_STATE
] = {
3629 [MLX5_QP_STATE_RST
] = {
3630 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
3631 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
3632 [MLX5_QP_STATE_INIT
] = MLX5_CMD_OP_RST2INIT_QP
,
3634 [MLX5_QP_STATE_INIT
] = {
3635 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
3636 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
3637 [MLX5_QP_STATE_INIT
] = MLX5_CMD_OP_INIT2INIT_QP
,
3638 [MLX5_QP_STATE_RTR
] = MLX5_CMD_OP_INIT2RTR_QP
,
3640 [MLX5_QP_STATE_RTR
] = {
3641 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
3642 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
3643 [MLX5_QP_STATE_RTS
] = MLX5_CMD_OP_RTR2RTS_QP
,
3645 [MLX5_QP_STATE_RTS
] = {
3646 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
3647 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
3648 [MLX5_QP_STATE_RTS
] = MLX5_CMD_OP_RTS2RTS_QP
,
3650 [MLX5_QP_STATE_SQD
] = {
3651 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
3652 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
3654 [MLX5_QP_STATE_SQER
] = {
3655 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
3656 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
3657 [MLX5_QP_STATE_RTS
] = MLX5_CMD_OP_SQERR2RTS_QP
,
3659 [MLX5_QP_STATE_ERR
] = {
3660 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
3661 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
3665 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
3666 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
3667 struct mlx5_ib_qp_base
*base
= &qp
->trans_qp
.base
;
3668 struct mlx5_ib_cq
*send_cq
, *recv_cq
;
3669 struct mlx5_qp_context
*context
;
3670 struct mlx5_ib_pd
*pd
;
3671 enum mlx5_qp_state mlx5_cur
, mlx5_new
;
3672 enum mlx5_qp_optpar optpar
;
3679 mlx5_st
= to_mlx5_st(qp
->type
);
3683 context
= kzalloc(sizeof(*context
), GFP_KERNEL
);
3688 context
->flags
= cpu_to_be32(mlx5_st
<< 16);
3690 if (!(attr_mask
& IB_QP_PATH_MIG_STATE
)) {
3691 context
->flags
|= cpu_to_be32(MLX5_QP_PM_MIGRATED
<< 11);
3693 switch (attr
->path_mig_state
) {
3694 case IB_MIG_MIGRATED
:
3695 context
->flags
|= cpu_to_be32(MLX5_QP_PM_MIGRATED
<< 11);
3698 context
->flags
|= cpu_to_be32(MLX5_QP_PM_REARM
<< 11);
3701 context
->flags
|= cpu_to_be32(MLX5_QP_PM_ARMED
<< 11);
3706 if ((cur_state
== IB_QPS_RESET
) && (new_state
== IB_QPS_INIT
)) {
3707 if ((ibqp
->qp_type
== IB_QPT_RC
) ||
3708 (ibqp
->qp_type
== IB_QPT_UD
&&
3709 !(qp
->flags
& MLX5_IB_QP_CREATE_SQPN_QP1
)) ||
3710 (ibqp
->qp_type
== IB_QPT_UC
) ||
3711 (ibqp
->qp_type
== IB_QPT_RAW_PACKET
) ||
3712 (ibqp
->qp_type
== IB_QPT_XRC_INI
) ||
3713 (ibqp
->qp_type
== IB_QPT_XRC_TGT
)) {
3714 if (dev
->lag_active
) {
3715 u8 p
= mlx5_core_native_port_num(dev
->mdev
) - 1;
3716 tx_affinity
= get_tx_affinity(dev
, pd
, base
, p
,
3718 context
->flags
|= cpu_to_be32(tx_affinity
<< 24);
3723 if (is_sqp(ibqp
->qp_type
)) {
3724 context
->mtu_msgmax
= (IB_MTU_256
<< 5) | 8;
3725 } else if ((ibqp
->qp_type
== IB_QPT_UD
&&
3726 !(qp
->flags
& IB_QP_CREATE_SOURCE_QPN
)) ||
3727 ibqp
->qp_type
== MLX5_IB_QPT_REG_UMR
) {
3728 context
->mtu_msgmax
= (IB_MTU_4096
<< 5) | 12;
3729 } else if (attr_mask
& IB_QP_PATH_MTU
) {
3730 if (attr
->path_mtu
< IB_MTU_256
||
3731 attr
->path_mtu
> IB_MTU_4096
) {
3732 mlx5_ib_warn(dev
, "invalid mtu %d\n", attr
->path_mtu
);
3736 context
->mtu_msgmax
= (attr
->path_mtu
<< 5) |
3737 (u8
)MLX5_CAP_GEN(dev
->mdev
, log_max_msg
);
3740 if (attr_mask
& IB_QP_DEST_QPN
)
3741 context
->log_pg_sz_remote_qpn
= cpu_to_be32(attr
->dest_qp_num
);
3743 if (attr_mask
& IB_QP_PKEY_INDEX
)
3744 context
->pri_path
.pkey_index
= cpu_to_be16(attr
->pkey_index
);
3746 /* todo implement counter_index functionality */
3748 if (is_sqp(ibqp
->qp_type
))
3749 context
->pri_path
.port
= qp
->port
;
3751 if (attr_mask
& IB_QP_PORT
)
3752 context
->pri_path
.port
= attr
->port_num
;
3754 if (attr_mask
& IB_QP_AV
) {
3755 err
= mlx5_set_path(dev
, qp
, &attr
->ah_attr
, &context
->pri_path
,
3756 attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
,
3757 attr_mask
, 0, attr
, false);
3762 if (attr_mask
& IB_QP_TIMEOUT
)
3763 context
->pri_path
.ackto_lt
|= attr
->timeout
<< 3;
3765 if (attr_mask
& IB_QP_ALT_PATH
) {
3766 err
= mlx5_set_path(dev
, qp
, &attr
->alt_ah_attr
,
3769 attr_mask
| IB_QP_PKEY_INDEX
| IB_QP_TIMEOUT
,
3775 get_cqs(qp
->ibqp
.qp_type
, qp
->ibqp
.send_cq
, qp
->ibqp
.recv_cq
,
3776 &send_cq
, &recv_cq
);
3778 context
->flags_pd
= cpu_to_be32(pd
? pd
->pdn
: to_mpd(dev
->devr
.p0
)->pdn
);
3779 context
->cqn_send
= send_cq
? cpu_to_be32(send_cq
->mcq
.cqn
) : 0;
3780 context
->cqn_recv
= recv_cq
? cpu_to_be32(recv_cq
->mcq
.cqn
) : 0;
3781 context
->params1
= cpu_to_be32(MLX5_IB_ACK_REQ_FREQ
<< 28);
3783 if (attr_mask
& IB_QP_RNR_RETRY
)
3784 context
->params1
|= cpu_to_be32(attr
->rnr_retry
<< 13);
3786 if (attr_mask
& IB_QP_RETRY_CNT
)
3787 context
->params1
|= cpu_to_be32(attr
->retry_cnt
<< 16);
3789 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
) {
3790 if (attr
->max_rd_atomic
)
3792 cpu_to_be32(fls(attr
->max_rd_atomic
- 1) << 21);
3795 if (attr_mask
& IB_QP_SQ_PSN
)
3796 context
->next_send_psn
= cpu_to_be32(attr
->sq_psn
);
3798 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
) {
3799 if (attr
->max_dest_rd_atomic
)
3801 cpu_to_be32(fls(attr
->max_dest_rd_atomic
- 1) << 21);
3804 if (attr_mask
& (IB_QP_ACCESS_FLAGS
| IB_QP_MAX_DEST_RD_ATOMIC
)) {
3805 __be32 access_flags
;
3807 err
= to_mlx5_access_flags(qp
, attr
, attr_mask
, &access_flags
);
3811 context
->params2
|= access_flags
;
3814 if (attr_mask
& IB_QP_MIN_RNR_TIMER
)
3815 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->min_rnr_timer
<< 24);
3817 if (attr_mask
& IB_QP_RQ_PSN
)
3818 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->rq_psn
);
3820 if (attr_mask
& IB_QP_QKEY
)
3821 context
->qkey
= cpu_to_be32(attr
->qkey
);
3823 if (qp
->rq
.wqe_cnt
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
3824 context
->db_rec_addr
= cpu_to_be64(qp
->db
.dma
);
3826 if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
) {
3827 u8 port_num
= (attr_mask
& IB_QP_PORT
? attr
->port_num
:
3830 /* Underlay port should be used - index 0 function per port */
3831 if (qp
->flags
& IB_QP_CREATE_SOURCE_QPN
)
3835 set_id
= ibqp
->counter
->id
;
3837 set_id
= mlx5_ib_get_counters_id(dev
, port_num
);
3838 context
->qp_counter_set_usr_page
|=
3839 cpu_to_be32(set_id
<< 24);
3842 if (!ibqp
->uobject
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
3843 context
->sq_crq_size
|= cpu_to_be16(1 << 4);
3845 if (qp
->flags
& MLX5_IB_QP_CREATE_SQPN_QP1
)
3846 context
->deth_sqpn
= cpu_to_be32(1);
3848 mlx5_cur
= to_mlx5_state(cur_state
);
3849 mlx5_new
= to_mlx5_state(new_state
);
3851 if (mlx5_cur
>= MLX5_QP_NUM_STATE
|| mlx5_new
>= MLX5_QP_NUM_STATE
||
3852 !optab
[mlx5_cur
][mlx5_new
]) {
3857 op
= optab
[mlx5_cur
][mlx5_new
];
3858 optpar
= ib_mask_to_mlx5_opt(attr_mask
);
3859 optpar
&= opt_mask
[mlx5_cur
][mlx5_new
][mlx5_st
];
3861 if (qp
->ibqp
.qp_type
== IB_QPT_RAW_PACKET
||
3862 qp
->flags
& IB_QP_CREATE_SOURCE_QPN
) {
3863 struct mlx5_modify_raw_qp_param raw_qp_param
= {};
3865 raw_qp_param
.operation
= op
;
3866 if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
) {
3867 raw_qp_param
.rq_q_ctr_id
= set_id
;
3868 raw_qp_param
.set_mask
|= MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID
;
3871 if (attr_mask
& IB_QP_PORT
)
3872 raw_qp_param
.port
= attr
->port_num
;
3874 if (attr_mask
& IB_QP_RATE_LIMIT
) {
3875 raw_qp_param
.rl
.rate
= attr
->rate_limit
;
3877 if (ucmd
->burst_info
.max_burst_sz
) {
3878 if (attr
->rate_limit
&&
3879 MLX5_CAP_QOS(dev
->mdev
, packet_pacing_burst_bound
)) {
3880 raw_qp_param
.rl
.max_burst_sz
=
3881 ucmd
->burst_info
.max_burst_sz
;
3888 if (ucmd
->burst_info
.typical_pkt_sz
) {
3889 if (attr
->rate_limit
&&
3890 MLX5_CAP_QOS(dev
->mdev
, packet_pacing_typical_size
)) {
3891 raw_qp_param
.rl
.typical_pkt_sz
=
3892 ucmd
->burst_info
.typical_pkt_sz
;
3899 raw_qp_param
.set_mask
|= MLX5_RAW_QP_RATE_LIMIT
;
3902 err
= modify_raw_packet_qp(dev
, qp
, &raw_qp_param
, tx_affinity
);
3904 err
= mlx5_core_qp_modify(dev
, op
, optpar
, context
, &base
->mqp
);
3910 qp
->state
= new_state
;
3912 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
3913 qp
->trans_qp
.atomic_rd_en
= attr
->qp_access_flags
;
3914 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
3915 qp
->trans_qp
.resp_depth
= attr
->max_dest_rd_atomic
;
3916 if (attr_mask
& IB_QP_PORT
)
3917 qp
->port
= attr
->port_num
;
3918 if (attr_mask
& IB_QP_ALT_PATH
)
3919 qp
->trans_qp
.alt_port
= attr
->alt_port_num
;
3922 * If we moved a kernel QP to RESET, clean up all old CQ
3923 * entries and reinitialize the QP.
3925 if (new_state
== IB_QPS_RESET
&&
3926 !ibqp
->uobject
&& ibqp
->qp_type
!= IB_QPT_XRC_TGT
) {
3927 mlx5_ib_cq_clean(recv_cq
, base
->mqp
.qpn
,
3928 ibqp
->srq
? to_msrq(ibqp
->srq
) : NULL
);
3929 if (send_cq
!= recv_cq
)
3930 mlx5_ib_cq_clean(send_cq
, base
->mqp
.qpn
, NULL
);
3936 qp
->sq
.cur_post
= 0;
3938 qp
->sq
.cur_edge
= get_sq_edge(&qp
->sq
, 0);
3939 qp
->sq
.last_poll
= 0;
3940 qp
->db
.db
[MLX5_RCV_DBR
] = 0;
3941 qp
->db
.db
[MLX5_SND_DBR
] = 0;
3944 if ((new_state
== IB_QPS_RTS
) && qp
->counter_pending
) {
3945 err
= __mlx5_ib_qp_set_counter(ibqp
, ibqp
->counter
);
3947 qp
->counter_pending
= 0;
3955 static inline bool is_valid_mask(int mask
, int req
, int opt
)
3957 if ((mask
& req
) != req
)
3960 if (mask
& ~(req
| opt
))
3966 /* check valid transition for driver QP types
3967 * for now the only QP type that this function supports is DCI
3969 static bool modify_dci_qp_is_ok(enum ib_qp_state cur_state
, enum ib_qp_state new_state
,
3970 enum ib_qp_attr_mask attr_mask
)
3972 int req
= IB_QP_STATE
;
3975 if (new_state
== IB_QPS_RESET
) {
3976 return is_valid_mask(attr_mask
, req
, opt
);
3977 } else if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
) {
3978 req
|= IB_QP_PKEY_INDEX
| IB_QP_PORT
;
3979 return is_valid_mask(attr_mask
, req
, opt
);
3980 } else if (cur_state
== IB_QPS_INIT
&& new_state
== IB_QPS_INIT
) {
3981 opt
= IB_QP_PKEY_INDEX
| IB_QP_PORT
;
3982 return is_valid_mask(attr_mask
, req
, opt
);
3983 } else if (cur_state
== IB_QPS_INIT
&& new_state
== IB_QPS_RTR
) {
3984 req
|= IB_QP_PATH_MTU
;
3985 opt
= IB_QP_PKEY_INDEX
| IB_QP_AV
;
3986 return is_valid_mask(attr_mask
, req
, opt
);
3987 } else if (cur_state
== IB_QPS_RTR
&& new_state
== IB_QPS_RTS
) {
3988 req
|= IB_QP_TIMEOUT
| IB_QP_RETRY_CNT
| IB_QP_RNR_RETRY
|
3989 IB_QP_MAX_QP_RD_ATOMIC
| IB_QP_SQ_PSN
;
3990 opt
= IB_QP_MIN_RNR_TIMER
;
3991 return is_valid_mask(attr_mask
, req
, opt
);
3992 } else if (cur_state
== IB_QPS_RTS
&& new_state
== IB_QPS_RTS
) {
3993 opt
= IB_QP_MIN_RNR_TIMER
;
3994 return is_valid_mask(attr_mask
, req
, opt
);
3995 } else if (cur_state
!= IB_QPS_RESET
&& new_state
== IB_QPS_ERR
) {
3996 return is_valid_mask(attr_mask
, req
, opt
);
4001 /* mlx5_ib_modify_dct: modify a DCT QP
4002 * valid transitions are:
4003 * RESET to INIT: must set access_flags, pkey_index and port
4004 * INIT to RTR : must set min_rnr_timer, tclass, flow_label,
4005 * mtu, gid_index and hop_limit
4006 * Other transitions and attributes are illegal
4008 static int mlx5_ib_modify_dct(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
4009 int attr_mask
, struct ib_udata
*udata
)
4011 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
4012 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
4013 enum ib_qp_state cur_state
, new_state
;
4015 int required
= IB_QP_STATE
;
4018 if (!(attr_mask
& IB_QP_STATE
))
4021 cur_state
= qp
->state
;
4022 new_state
= attr
->qp_state
;
4024 dctc
= MLX5_ADDR_OF(create_dct_in
, qp
->dct
.in
, dct_context_entry
);
4025 if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
) {
4028 required
|= IB_QP_ACCESS_FLAGS
| IB_QP_PKEY_INDEX
| IB_QP_PORT
;
4029 if (!is_valid_mask(attr_mask
, required
, 0))
4032 if (attr
->port_num
== 0 ||
4033 attr
->port_num
> MLX5_CAP_GEN(dev
->mdev
, num_ports
)) {
4034 mlx5_ib_dbg(dev
, "invalid port number %d. number of ports is %d\n",
4035 attr
->port_num
, dev
->num_ports
);
4038 if (attr
->qp_access_flags
& IB_ACCESS_REMOTE_READ
)
4039 MLX5_SET(dctc
, dctc
, rre
, 1);
4040 if (attr
->qp_access_flags
& IB_ACCESS_REMOTE_WRITE
)
4041 MLX5_SET(dctc
, dctc
, rwe
, 1);
4042 if (attr
->qp_access_flags
& IB_ACCESS_REMOTE_ATOMIC
) {
4045 atomic_mode
= get_atomic_mode(dev
, MLX5_IB_QPT_DCT
);
4046 if (atomic_mode
< 0)
4049 MLX5_SET(dctc
, dctc
, atomic_mode
, atomic_mode
);
4050 MLX5_SET(dctc
, dctc
, rae
, 1);
4052 MLX5_SET(dctc
, dctc
, pkey_index
, attr
->pkey_index
);
4053 MLX5_SET(dctc
, dctc
, port
, attr
->port_num
);
4055 set_id
= mlx5_ib_get_counters_id(dev
, attr
->port_num
- 1);
4056 MLX5_SET(dctc
, dctc
, counter_set_id
, set_id
);
4058 } else if (cur_state
== IB_QPS_INIT
&& new_state
== IB_QPS_RTR
) {
4059 struct mlx5_ib_modify_qp_resp resp
= {};
4060 u32 out
[MLX5_ST_SZ_DW(create_dct_out
)] = {0};
4061 u32 min_resp_len
= offsetof(typeof(resp
), dctn
) +
4064 if (udata
->outlen
< min_resp_len
)
4066 resp
.response_length
= min_resp_len
;
4068 required
|= IB_QP_MIN_RNR_TIMER
| IB_QP_AV
| IB_QP_PATH_MTU
;
4069 if (!is_valid_mask(attr_mask
, required
, 0))
4071 MLX5_SET(dctc
, dctc
, min_rnr_nak
, attr
->min_rnr_timer
);
4072 MLX5_SET(dctc
, dctc
, tclass
, attr
->ah_attr
.grh
.traffic_class
);
4073 MLX5_SET(dctc
, dctc
, flow_label
, attr
->ah_attr
.grh
.flow_label
);
4074 MLX5_SET(dctc
, dctc
, mtu
, attr
->path_mtu
);
4075 MLX5_SET(dctc
, dctc
, my_addr_index
, attr
->ah_attr
.grh
.sgid_index
);
4076 MLX5_SET(dctc
, dctc
, hop_limit
, attr
->ah_attr
.grh
.hop_limit
);
4078 err
= mlx5_core_create_dct(dev
, &qp
->dct
.mdct
, qp
->dct
.in
,
4079 MLX5_ST_SZ_BYTES(create_dct_in
), out
,
4083 resp
.dctn
= qp
->dct
.mdct
.mqp
.qpn
;
4084 err
= ib_copy_to_udata(udata
, &resp
, resp
.response_length
);
4086 mlx5_core_destroy_dct(dev
, &qp
->dct
.mdct
);
4090 mlx5_ib_warn(dev
, "Modify DCT: Invalid transition from %d to %d\n", cur_state
, new_state
);
4094 qp
->state
= IB_QPS_ERR
;
4096 qp
->state
= new_state
;
4100 int mlx5_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
4101 int attr_mask
, struct ib_udata
*udata
)
4103 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
4104 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
4105 struct mlx5_ib_modify_qp ucmd
= {};
4106 enum ib_qp_type qp_type
;
4107 enum ib_qp_state cur_state
, new_state
;
4108 size_t required_cmd_sz
;
4112 if (ibqp
->rwq_ind_tbl
)
4115 if (udata
&& udata
->inlen
) {
4116 required_cmd_sz
= offsetof(typeof(ucmd
), reserved
) +
4117 sizeof(ucmd
.reserved
);
4118 if (udata
->inlen
< required_cmd_sz
)
4121 if (udata
->inlen
> sizeof(ucmd
) &&
4122 !ib_is_udata_cleared(udata
, sizeof(ucmd
),
4123 udata
->inlen
- sizeof(ucmd
)))
4126 if (ib_copy_from_udata(&ucmd
, udata
,
4127 min(udata
->inlen
, sizeof(ucmd
))))
4130 if (ucmd
.comp_mask
||
4131 memchr_inv(&ucmd
.reserved
, 0, sizeof(ucmd
.reserved
)) ||
4132 memchr_inv(&ucmd
.burst_info
.reserved
, 0,
4133 sizeof(ucmd
.burst_info
.reserved
)))
4137 if (unlikely(ibqp
->qp_type
== IB_QPT_GSI
))
4138 return mlx5_ib_gsi_modify_qp(ibqp
, attr
, attr_mask
);
4140 qp_type
= (unlikely(ibqp
->qp_type
== MLX5_IB_QPT_HW_GSI
)) ? IB_QPT_GSI
:
4143 if (qp_type
== MLX5_IB_QPT_DCT
)
4144 return mlx5_ib_modify_dct(ibqp
, attr
, attr_mask
, udata
);
4146 mutex_lock(&qp
->mutex
);
4148 cur_state
= attr_mask
& IB_QP_CUR_STATE
? attr
->cur_qp_state
: qp
->state
;
4149 new_state
= attr_mask
& IB_QP_STATE
? attr
->qp_state
: cur_state
;
4151 if (!(cur_state
== new_state
&& cur_state
== IB_QPS_RESET
)) {
4152 port
= attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
;
4155 if (qp
->flags
& IB_QP_CREATE_SOURCE_QPN
) {
4156 if (attr_mask
& ~(IB_QP_STATE
| IB_QP_CUR_STATE
)) {
4157 mlx5_ib_dbg(dev
, "invalid attr_mask 0x%x when underlay QP is used\n",
4161 } else if (qp_type
!= MLX5_IB_QPT_REG_UMR
&&
4162 qp_type
!= MLX5_IB_QPT_DCI
&&
4163 !ib_modify_qp_is_ok(cur_state
, new_state
, qp_type
,
4165 mlx5_ib_dbg(dev
, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n",
4166 cur_state
, new_state
, ibqp
->qp_type
, attr_mask
);
4168 } else if (qp_type
== MLX5_IB_QPT_DCI
&&
4169 !modify_dci_qp_is_ok(cur_state
, new_state
, attr_mask
)) {
4170 mlx5_ib_dbg(dev
, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n",
4171 cur_state
, new_state
, qp_type
, attr_mask
);
4175 if ((attr_mask
& IB_QP_PORT
) &&
4176 (attr
->port_num
== 0 ||
4177 attr
->port_num
> dev
->num_ports
)) {
4178 mlx5_ib_dbg(dev
, "invalid port number %d. number of ports is %d\n",
4179 attr
->port_num
, dev
->num_ports
);
4183 if (attr_mask
& IB_QP_PKEY_INDEX
) {
4184 port
= attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
;
4185 if (attr
->pkey_index
>=
4186 dev
->mdev
->port_caps
[port
- 1].pkey_table_len
) {
4187 mlx5_ib_dbg(dev
, "invalid pkey index %d\n",
4193 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
&&
4194 attr
->max_rd_atomic
>
4195 (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_ra_res_qp
))) {
4196 mlx5_ib_dbg(dev
, "invalid max_rd_atomic value %d\n",
4197 attr
->max_rd_atomic
);
4201 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
&&
4202 attr
->max_dest_rd_atomic
>
4203 (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_ra_req_qp
))) {
4204 mlx5_ib_dbg(dev
, "invalid max_dest_rd_atomic value %d\n",
4205 attr
->max_dest_rd_atomic
);
4209 if (cur_state
== new_state
&& cur_state
== IB_QPS_RESET
) {
4214 err
= __mlx5_ib_modify_qp(ibqp
, attr
, attr_mask
, cur_state
,
4215 new_state
, &ucmd
, udata
);
4218 mutex_unlock(&qp
->mutex
);
4222 static void _handle_post_send_edge(struct mlx5_ib_wq
*sq
, void **seg
,
4223 u32 wqe_sz
, void **cur_edge
)
4227 idx
= (sq
->cur_post
+ (wqe_sz
>> 2)) & (sq
->wqe_cnt
- 1);
4228 *cur_edge
= get_sq_edge(sq
, idx
);
4230 *seg
= mlx5_frag_buf_get_wqe(&sq
->fbc
, idx
);
4233 /* handle_post_send_edge - Check if we get to SQ edge. If yes, update to the
4234 * next nearby edge and get new address translation for current WQE position.
4236 * @seg: Current WQE position (16B aligned).
4237 * @wqe_sz: Total current WQE size [16B].
4238 * @cur_edge: Updated current edge.
4240 static inline void handle_post_send_edge(struct mlx5_ib_wq
*sq
, void **seg
,
4241 u32 wqe_sz
, void **cur_edge
)
4243 if (likely(*seg
!= *cur_edge
))
4246 _handle_post_send_edge(sq
, seg
, wqe_sz
, cur_edge
);
4249 /* memcpy_send_wqe - copy data from src to WQE and update the relevant WQ's
4250 * pointers. At the end @seg is aligned to 16B regardless the copied size.
4252 * @cur_edge: Updated current edge.
4253 * @seg: Current WQE position (16B aligned).
4254 * @wqe_sz: Total current WQE size [16B].
4255 * @src: Pointer to copy from.
4256 * @n: Number of bytes to copy.
4258 static inline void memcpy_send_wqe(struct mlx5_ib_wq
*sq
, void **cur_edge
,
4259 void **seg
, u32
*wqe_sz
, const void *src
,
4263 size_t leftlen
= *cur_edge
- *seg
;
4264 size_t copysz
= min_t(size_t, leftlen
, n
);
4267 memcpy(*seg
, src
, copysz
);
4271 stride
= !n
? ALIGN(copysz
, 16) : copysz
;
4273 *wqe_sz
+= stride
>> 4;
4274 handle_post_send_edge(sq
, seg
, *wqe_sz
, cur_edge
);
4278 static int mlx5_wq_overflow(struct mlx5_ib_wq
*wq
, int nreq
, struct ib_cq
*ib_cq
)
4280 struct mlx5_ib_cq
*cq
;
4283 cur
= wq
->head
- wq
->tail
;
4284 if (likely(cur
+ nreq
< wq
->max_post
))
4288 spin_lock(&cq
->lock
);
4289 cur
= wq
->head
- wq
->tail
;
4290 spin_unlock(&cq
->lock
);
4292 return cur
+ nreq
>= wq
->max_post
;
4295 static __always_inline
void set_raddr_seg(struct mlx5_wqe_raddr_seg
*rseg
,
4296 u64 remote_addr
, u32 rkey
)
4298 rseg
->raddr
= cpu_to_be64(remote_addr
);
4299 rseg
->rkey
= cpu_to_be32(rkey
);
4303 static void set_eth_seg(const struct ib_send_wr
*wr
, struct mlx5_ib_qp
*qp
,
4304 void **seg
, int *size
, void **cur_edge
)
4306 struct mlx5_wqe_eth_seg
*eseg
= *seg
;
4308 memset(eseg
, 0, sizeof(struct mlx5_wqe_eth_seg
));
4310 if (wr
->send_flags
& IB_SEND_IP_CSUM
)
4311 eseg
->cs_flags
= MLX5_ETH_WQE_L3_CSUM
|
4312 MLX5_ETH_WQE_L4_CSUM
;
4314 if (wr
->opcode
== IB_WR_LSO
) {
4315 struct ib_ud_wr
*ud_wr
= container_of(wr
, struct ib_ud_wr
, wr
);
4316 size_t left
, copysz
;
4317 void *pdata
= ud_wr
->header
;
4321 eseg
->mss
= cpu_to_be16(ud_wr
->mss
);
4322 eseg
->inline_hdr
.sz
= cpu_to_be16(left
);
4324 /* memcpy_send_wqe should get a 16B align address. Hence, we
4325 * first copy up to the current edge and then, if needed,
4326 * fall-through to memcpy_send_wqe.
4328 copysz
= min_t(u64
, *cur_edge
- (void *)eseg
->inline_hdr
.start
,
4330 memcpy(eseg
->inline_hdr
.start
, pdata
, copysz
);
4331 stride
= ALIGN(sizeof(struct mlx5_wqe_eth_seg
) -
4332 sizeof(eseg
->inline_hdr
.start
) + copysz
, 16);
4333 *size
+= stride
/ 16;
4336 if (copysz
< left
) {
4337 handle_post_send_edge(&qp
->sq
, seg
, *size
, cur_edge
);
4340 memcpy_send_wqe(&qp
->sq
, cur_edge
, seg
, size
, pdata
,
4347 *seg
+= sizeof(struct mlx5_wqe_eth_seg
);
4348 *size
+= sizeof(struct mlx5_wqe_eth_seg
) / 16;
4351 static void set_datagram_seg(struct mlx5_wqe_datagram_seg
*dseg
,
4352 const struct ib_send_wr
*wr
)
4354 memcpy(&dseg
->av
, &to_mah(ud_wr(wr
)->ah
)->av
, sizeof(struct mlx5_av
));
4355 dseg
->av
.dqp_dct
= cpu_to_be32(ud_wr(wr
)->remote_qpn
| MLX5_EXTENDED_UD_AV
);
4356 dseg
->av
.key
.qkey
.qkey
= cpu_to_be32(ud_wr(wr
)->remote_qkey
);
4359 static void set_data_ptr_seg(struct mlx5_wqe_data_seg
*dseg
, struct ib_sge
*sg
)
4361 dseg
->byte_count
= cpu_to_be32(sg
->length
);
4362 dseg
->lkey
= cpu_to_be32(sg
->lkey
);
4363 dseg
->addr
= cpu_to_be64(sg
->addr
);
4366 static u64
get_xlt_octo(u64 bytes
)
4368 return ALIGN(bytes
, MLX5_IB_UMR_XLT_ALIGNMENT
) /
4369 MLX5_IB_UMR_OCTOWORD
;
4372 static __be64
frwr_mkey_mask(bool atomic
)
4376 result
= MLX5_MKEY_MASK_LEN
|
4377 MLX5_MKEY_MASK_PAGE_SIZE
|
4378 MLX5_MKEY_MASK_START_ADDR
|
4379 MLX5_MKEY_MASK_EN_RINVAL
|
4380 MLX5_MKEY_MASK_KEY
|
4385 MLX5_MKEY_MASK_SMALL_FENCE
|
4386 MLX5_MKEY_MASK_FREE
;
4389 result
|= MLX5_MKEY_MASK_A
;
4391 return cpu_to_be64(result
);
4394 static __be64
sig_mkey_mask(void)
4398 result
= MLX5_MKEY_MASK_LEN
|
4399 MLX5_MKEY_MASK_PAGE_SIZE
|
4400 MLX5_MKEY_MASK_START_ADDR
|
4401 MLX5_MKEY_MASK_EN_SIGERR
|
4402 MLX5_MKEY_MASK_EN_RINVAL
|
4403 MLX5_MKEY_MASK_KEY
|
4408 MLX5_MKEY_MASK_SMALL_FENCE
|
4409 MLX5_MKEY_MASK_FREE
|
4410 MLX5_MKEY_MASK_BSF_EN
;
4412 return cpu_to_be64(result
);
4415 static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg
*umr
,
4416 struct mlx5_ib_mr
*mr
, u8 flags
, bool atomic
)
4418 int size
= (mr
->ndescs
+ mr
->meta_ndescs
) * mr
->desc_size
;
4420 memset(umr
, 0, sizeof(*umr
));
4423 umr
->xlt_octowords
= cpu_to_be16(get_xlt_octo(size
));
4424 umr
->mkey_mask
= frwr_mkey_mask(atomic
);
4427 static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg
*umr
)
4429 memset(umr
, 0, sizeof(*umr
));
4430 umr
->mkey_mask
= cpu_to_be64(MLX5_MKEY_MASK_FREE
);
4431 umr
->flags
= MLX5_UMR_INLINE
;
4434 static __be64
get_umr_enable_mr_mask(void)
4438 result
= MLX5_MKEY_MASK_KEY
|
4439 MLX5_MKEY_MASK_FREE
;
4441 return cpu_to_be64(result
);
4444 static __be64
get_umr_disable_mr_mask(void)
4448 result
= MLX5_MKEY_MASK_FREE
;
4450 return cpu_to_be64(result
);
4453 static __be64
get_umr_update_translation_mask(void)
4457 result
= MLX5_MKEY_MASK_LEN
|
4458 MLX5_MKEY_MASK_PAGE_SIZE
|
4459 MLX5_MKEY_MASK_START_ADDR
;
4461 return cpu_to_be64(result
);
4464 static __be64
get_umr_update_access_mask(int atomic
)
4468 result
= MLX5_MKEY_MASK_LR
|
4474 result
|= MLX5_MKEY_MASK_A
;
4476 return cpu_to_be64(result
);
4479 static __be64
get_umr_update_pd_mask(void)
4483 result
= MLX5_MKEY_MASK_PD
;
4485 return cpu_to_be64(result
);
4488 static int umr_check_mkey_mask(struct mlx5_ib_dev
*dev
, u64 mask
)
4490 if ((mask
& MLX5_MKEY_MASK_PAGE_SIZE
&&
4491 MLX5_CAP_GEN(dev
->mdev
, umr_modify_entity_size_disabled
)) ||
4492 (mask
& MLX5_MKEY_MASK_A
&&
4493 MLX5_CAP_GEN(dev
->mdev
, umr_modify_atomic_disabled
)))
4498 static int set_reg_umr_segment(struct mlx5_ib_dev
*dev
,
4499 struct mlx5_wqe_umr_ctrl_seg
*umr
,
4500 const struct ib_send_wr
*wr
, int atomic
)
4502 const struct mlx5_umr_wr
*umrwr
= umr_wr(wr
);
4504 memset(umr
, 0, sizeof(*umr
));
4506 if (!umrwr
->ignore_free_state
) {
4507 if (wr
->send_flags
& MLX5_IB_SEND_UMR_FAIL_IF_FREE
)
4509 umr
->flags
= MLX5_UMR_CHECK_FREE
;
4511 /* fail if not free */
4512 umr
->flags
= MLX5_UMR_CHECK_NOT_FREE
;
4515 umr
->xlt_octowords
= cpu_to_be16(get_xlt_octo(umrwr
->xlt_size
));
4516 if (wr
->send_flags
& MLX5_IB_SEND_UMR_UPDATE_XLT
) {
4517 u64 offset
= get_xlt_octo(umrwr
->offset
);
4519 umr
->xlt_offset
= cpu_to_be16(offset
& 0xffff);
4520 umr
->xlt_offset_47_16
= cpu_to_be32(offset
>> 16);
4521 umr
->flags
|= MLX5_UMR_TRANSLATION_OFFSET_EN
;
4523 if (wr
->send_flags
& MLX5_IB_SEND_UMR_UPDATE_TRANSLATION
)
4524 umr
->mkey_mask
|= get_umr_update_translation_mask();
4525 if (wr
->send_flags
& MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS
) {
4526 umr
->mkey_mask
|= get_umr_update_access_mask(atomic
);
4527 umr
->mkey_mask
|= get_umr_update_pd_mask();
4529 if (wr
->send_flags
& MLX5_IB_SEND_UMR_ENABLE_MR
)
4530 umr
->mkey_mask
|= get_umr_enable_mr_mask();
4531 if (wr
->send_flags
& MLX5_IB_SEND_UMR_DISABLE_MR
)
4532 umr
->mkey_mask
|= get_umr_disable_mr_mask();
4535 umr
->flags
|= MLX5_UMR_INLINE
;
4537 return umr_check_mkey_mask(dev
, be64_to_cpu(umr
->mkey_mask
));
4540 static u8
get_umr_flags(int acc
)
4542 return (acc
& IB_ACCESS_REMOTE_ATOMIC
? MLX5_PERM_ATOMIC
: 0) |
4543 (acc
& IB_ACCESS_REMOTE_WRITE
? MLX5_PERM_REMOTE_WRITE
: 0) |
4544 (acc
& IB_ACCESS_REMOTE_READ
? MLX5_PERM_REMOTE_READ
: 0) |
4545 (acc
& IB_ACCESS_LOCAL_WRITE
? MLX5_PERM_LOCAL_WRITE
: 0) |
4546 MLX5_PERM_LOCAL_READ
| MLX5_PERM_UMR_EN
;
4549 static void set_reg_mkey_seg(struct mlx5_mkey_seg
*seg
,
4550 struct mlx5_ib_mr
*mr
,
4551 u32 key
, int access
)
4553 int ndescs
= ALIGN(mr
->ndescs
+ mr
->meta_ndescs
, 8) >> 1;
4555 memset(seg
, 0, sizeof(*seg
));
4557 if (mr
->access_mode
== MLX5_MKC_ACCESS_MODE_MTT
)
4558 seg
->log2_page_size
= ilog2(mr
->ibmr
.page_size
);
4559 else if (mr
->access_mode
== MLX5_MKC_ACCESS_MODE_KLMS
)
4560 /* KLMs take twice the size of MTTs */
4563 seg
->flags
= get_umr_flags(access
) | mr
->access_mode
;
4564 seg
->qpn_mkey7_0
= cpu_to_be32((key
& 0xff) | 0xffffff00);
4565 seg
->flags_pd
= cpu_to_be32(MLX5_MKEY_REMOTE_INVAL
);
4566 seg
->start_addr
= cpu_to_be64(mr
->ibmr
.iova
);
4567 seg
->len
= cpu_to_be64(mr
->ibmr
.length
);
4568 seg
->xlt_oct_size
= cpu_to_be32(ndescs
);
4571 static void set_linv_mkey_seg(struct mlx5_mkey_seg
*seg
)
4573 memset(seg
, 0, sizeof(*seg
));
4574 seg
->status
= MLX5_MKEY_STATUS_FREE
;
4577 static void set_reg_mkey_segment(struct mlx5_mkey_seg
*seg
,
4578 const struct ib_send_wr
*wr
)
4580 const struct mlx5_umr_wr
*umrwr
= umr_wr(wr
);
4582 memset(seg
, 0, sizeof(*seg
));
4583 if (wr
->send_flags
& MLX5_IB_SEND_UMR_DISABLE_MR
)
4584 seg
->status
= MLX5_MKEY_STATUS_FREE
;
4586 seg
->flags
= convert_access(umrwr
->access_flags
);
4588 seg
->flags_pd
= cpu_to_be32(to_mpd(umrwr
->pd
)->pdn
);
4589 if (wr
->send_flags
& MLX5_IB_SEND_UMR_UPDATE_TRANSLATION
&&
4591 seg
->flags_pd
|= cpu_to_be32(MLX5_MKEY_LEN64
);
4593 seg
->start_addr
= cpu_to_be64(umrwr
->virt_addr
);
4594 seg
->len
= cpu_to_be64(umrwr
->length
);
4595 seg
->log2_page_size
= umrwr
->page_shift
;
4596 seg
->qpn_mkey7_0
= cpu_to_be32(0xffffff00 |
4597 mlx5_mkey_variant(umrwr
->mkey
));
4600 static void set_reg_data_seg(struct mlx5_wqe_data_seg
*dseg
,
4601 struct mlx5_ib_mr
*mr
,
4602 struct mlx5_ib_pd
*pd
)
4604 int bcount
= mr
->desc_size
* (mr
->ndescs
+ mr
->meta_ndescs
);
4606 dseg
->addr
= cpu_to_be64(mr
->desc_map
);
4607 dseg
->byte_count
= cpu_to_be32(ALIGN(bcount
, 64));
4608 dseg
->lkey
= cpu_to_be32(pd
->ibpd
.local_dma_lkey
);
4611 static __be32
send_ieth(const struct ib_send_wr
*wr
)
4613 switch (wr
->opcode
) {
4614 case IB_WR_SEND_WITH_IMM
:
4615 case IB_WR_RDMA_WRITE_WITH_IMM
:
4616 return wr
->ex
.imm_data
;
4618 case IB_WR_SEND_WITH_INV
:
4619 return cpu_to_be32(wr
->ex
.invalidate_rkey
);
4626 static u8
calc_sig(void *wqe
, int size
)
4632 for (i
= 0; i
< size
; i
++)
4638 static u8
wq_sig(void *wqe
)
4640 return calc_sig(wqe
, (*((u8
*)wqe
+ 8) & 0x3f) << 4);
4643 static int set_data_inl_seg(struct mlx5_ib_qp
*qp
, const struct ib_send_wr
*wr
,
4644 void **wqe
, int *wqe_sz
, void **cur_edge
)
4646 struct mlx5_wqe_inline_seg
*seg
;
4652 *wqe
+= sizeof(*seg
);
4653 offset
= sizeof(*seg
);
4655 for (i
= 0; i
< wr
->num_sge
; i
++) {
4656 size_t len
= wr
->sg_list
[i
].length
;
4657 void *addr
= (void *)(unsigned long)(wr
->sg_list
[i
].addr
);
4661 if (unlikely(inl
> qp
->max_inline_data
))
4664 while (likely(len
)) {
4668 handle_post_send_edge(&qp
->sq
, wqe
,
4669 *wqe_sz
+ (offset
>> 4),
4672 leftlen
= *cur_edge
- *wqe
;
4673 copysz
= min_t(size_t, leftlen
, len
);
4675 memcpy(*wqe
, addr
, copysz
);
4683 seg
->byte_count
= cpu_to_be32(inl
| MLX5_INLINE_SEG
);
4685 *wqe_sz
+= ALIGN(inl
+ sizeof(seg
->byte_count
), 16) / 16;
4690 static u16
prot_field_size(enum ib_signature_type type
)
4693 case IB_SIG_TYPE_T10_DIF
:
4694 return MLX5_DIF_SIZE
;
4700 static u8
bs_selector(int block_size
)
4702 switch (block_size
) {
4703 case 512: return 0x1;
4704 case 520: return 0x2;
4705 case 4096: return 0x3;
4706 case 4160: return 0x4;
4707 case 1073741824: return 0x5;
4712 static void mlx5_fill_inl_bsf(struct ib_sig_domain
*domain
,
4713 struct mlx5_bsf_inl
*inl
)
4715 /* Valid inline section and allow BSF refresh */
4716 inl
->vld_refresh
= cpu_to_be16(MLX5_BSF_INL_VALID
|
4717 MLX5_BSF_REFRESH_DIF
);
4718 inl
->dif_apptag
= cpu_to_be16(domain
->sig
.dif
.app_tag
);
4719 inl
->dif_reftag
= cpu_to_be32(domain
->sig
.dif
.ref_tag
);
4720 /* repeating block */
4721 inl
->rp_inv_seed
= MLX5_BSF_REPEAT_BLOCK
;
4722 inl
->sig_type
= domain
->sig
.dif
.bg_type
== IB_T10DIF_CRC
?
4723 MLX5_DIF_CRC
: MLX5_DIF_IPCS
;
4725 if (domain
->sig
.dif
.ref_remap
)
4726 inl
->dif_inc_ref_guard_check
|= MLX5_BSF_INC_REFTAG
;
4728 if (domain
->sig
.dif
.app_escape
) {
4729 if (domain
->sig
.dif
.ref_escape
)
4730 inl
->dif_inc_ref_guard_check
|= MLX5_BSF_APPREF_ESCAPE
;
4732 inl
->dif_inc_ref_guard_check
|= MLX5_BSF_APPTAG_ESCAPE
;
4735 inl
->dif_app_bitmask_check
=
4736 cpu_to_be16(domain
->sig
.dif
.apptag_check_mask
);
4739 static int mlx5_set_bsf(struct ib_mr
*sig_mr
,
4740 struct ib_sig_attrs
*sig_attrs
,
4741 struct mlx5_bsf
*bsf
, u32 data_size
)
4743 struct mlx5_core_sig_ctx
*msig
= to_mmr(sig_mr
)->sig
;
4744 struct mlx5_bsf_basic
*basic
= &bsf
->basic
;
4745 struct ib_sig_domain
*mem
= &sig_attrs
->mem
;
4746 struct ib_sig_domain
*wire
= &sig_attrs
->wire
;
4748 memset(bsf
, 0, sizeof(*bsf
));
4750 /* Basic + Extended + Inline */
4751 basic
->bsf_size_sbs
= 1 << 7;
4752 /* Input domain check byte mask */
4753 basic
->check_byte_mask
= sig_attrs
->check_mask
;
4754 basic
->raw_data_size
= cpu_to_be32(data_size
);
4757 switch (sig_attrs
->mem
.sig_type
) {
4758 case IB_SIG_TYPE_NONE
:
4760 case IB_SIG_TYPE_T10_DIF
:
4761 basic
->mem
.bs_selector
= bs_selector(mem
->sig
.dif
.pi_interval
);
4762 basic
->m_bfs_psv
= cpu_to_be32(msig
->psv_memory
.psv_idx
);
4763 mlx5_fill_inl_bsf(mem
, &bsf
->m_inl
);
4770 switch (sig_attrs
->wire
.sig_type
) {
4771 case IB_SIG_TYPE_NONE
:
4773 case IB_SIG_TYPE_T10_DIF
:
4774 if (mem
->sig
.dif
.pi_interval
== wire
->sig
.dif
.pi_interval
&&
4775 mem
->sig_type
== wire
->sig_type
) {
4776 /* Same block structure */
4777 basic
->bsf_size_sbs
|= 1 << 4;
4778 if (mem
->sig
.dif
.bg_type
== wire
->sig
.dif
.bg_type
)
4779 basic
->wire
.copy_byte_mask
|= MLX5_CPY_GRD_MASK
;
4780 if (mem
->sig
.dif
.app_tag
== wire
->sig
.dif
.app_tag
)
4781 basic
->wire
.copy_byte_mask
|= MLX5_CPY_APP_MASK
;
4782 if (mem
->sig
.dif
.ref_tag
== wire
->sig
.dif
.ref_tag
)
4783 basic
->wire
.copy_byte_mask
|= MLX5_CPY_REF_MASK
;
4785 basic
->wire
.bs_selector
= bs_selector(wire
->sig
.dif
.pi_interval
);
4787 basic
->w_bfs_psv
= cpu_to_be32(msig
->psv_wire
.psv_idx
);
4788 mlx5_fill_inl_bsf(wire
, &bsf
->w_inl
);
4797 static int set_sig_data_segment(const struct ib_send_wr
*send_wr
,
4798 struct ib_mr
*sig_mr
,
4799 struct ib_sig_attrs
*sig_attrs
,
4800 struct mlx5_ib_qp
*qp
, void **seg
, int *size
,
4803 struct mlx5_bsf
*bsf
;
4813 struct mlx5_ib_mr
*mr
= to_mmr(sig_mr
);
4814 struct mlx5_ib_mr
*pi_mr
= mr
->pi_mr
;
4816 data_len
= pi_mr
->data_length
;
4817 data_key
= pi_mr
->ibmr
.lkey
;
4818 data_va
= pi_mr
->data_iova
;
4819 if (pi_mr
->meta_ndescs
) {
4820 prot_len
= pi_mr
->meta_length
;
4821 prot_key
= pi_mr
->ibmr
.lkey
;
4822 prot_va
= pi_mr
->pi_iova
;
4826 if (!prot
|| (data_key
== prot_key
&& data_va
== prot_va
&&
4827 data_len
== prot_len
)) {
4829 * Source domain doesn't contain signature information
4830 * or data and protection are interleaved in memory.
4831 * So need construct:
4832 * ------------------
4834 * ------------------
4836 * ------------------
4838 struct mlx5_klm
*data_klm
= *seg
;
4840 data_klm
->bcount
= cpu_to_be32(data_len
);
4841 data_klm
->key
= cpu_to_be32(data_key
);
4842 data_klm
->va
= cpu_to_be64(data_va
);
4843 wqe_size
= ALIGN(sizeof(*data_klm
), 64);
4846 * Source domain contains signature information
4847 * So need construct a strided block format:
4848 * ---------------------------
4849 * | stride_block_ctrl |
4850 * ---------------------------
4852 * ---------------------------
4854 * ---------------------------
4856 * ---------------------------
4858 struct mlx5_stride_block_ctrl_seg
*sblock_ctrl
;
4859 struct mlx5_stride_block_entry
*data_sentry
;
4860 struct mlx5_stride_block_entry
*prot_sentry
;
4861 u16 block_size
= sig_attrs
->mem
.sig
.dif
.pi_interval
;
4865 data_sentry
= (void *)sblock_ctrl
+ sizeof(*sblock_ctrl
);
4866 prot_sentry
= (void *)data_sentry
+ sizeof(*data_sentry
);
4868 prot_size
= prot_field_size(sig_attrs
->mem
.sig_type
);
4870 pr_err("Bad block size given: %u\n", block_size
);
4873 sblock_ctrl
->bcount_per_cycle
= cpu_to_be32(block_size
+
4875 sblock_ctrl
->op
= cpu_to_be32(MLX5_STRIDE_BLOCK_OP
);
4876 sblock_ctrl
->repeat_count
= cpu_to_be32(data_len
/ block_size
);
4877 sblock_ctrl
->num_entries
= cpu_to_be16(2);
4879 data_sentry
->bcount
= cpu_to_be16(block_size
);
4880 data_sentry
->key
= cpu_to_be32(data_key
);
4881 data_sentry
->va
= cpu_to_be64(data_va
);
4882 data_sentry
->stride
= cpu_to_be16(block_size
);
4884 prot_sentry
->bcount
= cpu_to_be16(prot_size
);
4885 prot_sentry
->key
= cpu_to_be32(prot_key
);
4886 prot_sentry
->va
= cpu_to_be64(prot_va
);
4887 prot_sentry
->stride
= cpu_to_be16(prot_size
);
4889 wqe_size
= ALIGN(sizeof(*sblock_ctrl
) + sizeof(*data_sentry
) +
4890 sizeof(*prot_sentry
), 64);
4894 *size
+= wqe_size
/ 16;
4895 handle_post_send_edge(&qp
->sq
, seg
, *size
, cur_edge
);
4898 ret
= mlx5_set_bsf(sig_mr
, sig_attrs
, bsf
, data_len
);
4902 *seg
+= sizeof(*bsf
);
4903 *size
+= sizeof(*bsf
) / 16;
4904 handle_post_send_edge(&qp
->sq
, seg
, *size
, cur_edge
);
4909 static void set_sig_mkey_segment(struct mlx5_mkey_seg
*seg
,
4910 struct ib_mr
*sig_mr
, int access_flags
,
4911 u32 size
, u32 length
, u32 pdn
)
4913 u32 sig_key
= sig_mr
->rkey
;
4914 u8 sigerr
= to_mmr(sig_mr
)->sig
->sigerr_count
& 1;
4916 memset(seg
, 0, sizeof(*seg
));
4918 seg
->flags
= get_umr_flags(access_flags
) | MLX5_MKC_ACCESS_MODE_KLMS
;
4919 seg
->qpn_mkey7_0
= cpu_to_be32((sig_key
& 0xff) | 0xffffff00);
4920 seg
->flags_pd
= cpu_to_be32(MLX5_MKEY_REMOTE_INVAL
| sigerr
<< 26 |
4921 MLX5_MKEY_BSF_EN
| pdn
);
4922 seg
->len
= cpu_to_be64(length
);
4923 seg
->xlt_oct_size
= cpu_to_be32(get_xlt_octo(size
));
4924 seg
->bsfs_octo_size
= cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE
);
4927 static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg
*umr
,
4930 memset(umr
, 0, sizeof(*umr
));
4932 umr
->flags
= MLX5_FLAGS_INLINE
| MLX5_FLAGS_CHECK_FREE
;
4933 umr
->xlt_octowords
= cpu_to_be16(get_xlt_octo(size
));
4934 umr
->bsf_octowords
= cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE
);
4935 umr
->mkey_mask
= sig_mkey_mask();
4938 static int set_pi_umr_wr(const struct ib_send_wr
*send_wr
,
4939 struct mlx5_ib_qp
*qp
, void **seg
, int *size
,
4942 const struct ib_reg_wr
*wr
= reg_wr(send_wr
);
4943 struct mlx5_ib_mr
*sig_mr
= to_mmr(wr
->mr
);
4944 struct mlx5_ib_mr
*pi_mr
= sig_mr
->pi_mr
;
4945 struct ib_sig_attrs
*sig_attrs
= sig_mr
->ibmr
.sig_attrs
;
4946 u32 pdn
= get_pd(qp
)->pdn
;
4948 int region_len
, ret
;
4950 if (unlikely(send_wr
->num_sge
!= 0) ||
4951 unlikely(wr
->access
& IB_ACCESS_REMOTE_ATOMIC
) ||
4952 unlikely(!sig_mr
->sig
) || unlikely(!qp
->ibqp
.integrity_en
) ||
4953 unlikely(!sig_mr
->sig
->sig_status_checked
))
4956 /* length of the protected region, data + protection */
4957 region_len
= pi_mr
->ibmr
.length
;
4960 * KLM octoword size - if protection was provided
4961 * then we use strided block format (3 octowords),
4962 * else we use single KLM (1 octoword)
4964 if (sig_attrs
->mem
.sig_type
!= IB_SIG_TYPE_NONE
)
4967 xlt_size
= sizeof(struct mlx5_klm
);
4969 set_sig_umr_segment(*seg
, xlt_size
);
4970 *seg
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
);
4971 *size
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
) / 16;
4972 handle_post_send_edge(&qp
->sq
, seg
, *size
, cur_edge
);
4974 set_sig_mkey_segment(*seg
, wr
->mr
, wr
->access
, xlt_size
, region_len
,
4976 *seg
+= sizeof(struct mlx5_mkey_seg
);
4977 *size
+= sizeof(struct mlx5_mkey_seg
) / 16;
4978 handle_post_send_edge(&qp
->sq
, seg
, *size
, cur_edge
);
4980 ret
= set_sig_data_segment(send_wr
, wr
->mr
, sig_attrs
, qp
, seg
, size
,
4985 sig_mr
->sig
->sig_status_checked
= false;
4989 static int set_psv_wr(struct ib_sig_domain
*domain
,
4990 u32 psv_idx
, void **seg
, int *size
)
4992 struct mlx5_seg_set_psv
*psv_seg
= *seg
;
4994 memset(psv_seg
, 0, sizeof(*psv_seg
));
4995 psv_seg
->psv_num
= cpu_to_be32(psv_idx
);
4996 switch (domain
->sig_type
) {
4997 case IB_SIG_TYPE_NONE
:
4999 case IB_SIG_TYPE_T10_DIF
:
5000 psv_seg
->transient_sig
= cpu_to_be32(domain
->sig
.dif
.bg
<< 16 |
5001 domain
->sig
.dif
.app_tag
);
5002 psv_seg
->ref_tag
= cpu_to_be32(domain
->sig
.dif
.ref_tag
);
5005 pr_err("Bad signature type (%d) is given.\n",
5010 *seg
+= sizeof(*psv_seg
);
5011 *size
+= sizeof(*psv_seg
) / 16;
5016 static int set_reg_wr(struct mlx5_ib_qp
*qp
,
5017 const struct ib_reg_wr
*wr
,
5018 void **seg
, int *size
, void **cur_edge
,
5019 bool check_not_free
)
5021 struct mlx5_ib_mr
*mr
= to_mmr(wr
->mr
);
5022 struct mlx5_ib_pd
*pd
= to_mpd(qp
->ibqp
.pd
);
5023 struct mlx5_ib_dev
*dev
= to_mdev(pd
->ibpd
.device
);
5024 int mr_list_size
= (mr
->ndescs
+ mr
->meta_ndescs
) * mr
->desc_size
;
5025 bool umr_inline
= mr_list_size
<= MLX5_IB_SQ_UMR_INLINE_THRESHOLD
;
5026 bool atomic
= wr
->access
& IB_ACCESS_REMOTE_ATOMIC
;
5029 if (!mlx5_ib_can_use_umr(dev
, atomic
, wr
->access
)) {
5030 mlx5_ib_warn(to_mdev(qp
->ibqp
.device
),
5031 "Fast update of %s for MR is disabled\n",
5032 (MLX5_CAP_GEN(dev
->mdev
,
5033 umr_modify_entity_size_disabled
)) ?
5039 if (unlikely(wr
->wr
.send_flags
& IB_SEND_INLINE
)) {
5040 mlx5_ib_warn(to_mdev(qp
->ibqp
.device
),
5041 "Invalid IB_SEND_INLINE send flag\n");
5046 flags
|= MLX5_UMR_CHECK_NOT_FREE
;
5048 flags
|= MLX5_UMR_INLINE
;
5050 set_reg_umr_seg(*seg
, mr
, flags
, atomic
);
5051 *seg
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
);
5052 *size
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
) / 16;
5053 handle_post_send_edge(&qp
->sq
, seg
, *size
, cur_edge
);
5055 set_reg_mkey_seg(*seg
, mr
, wr
->key
, wr
->access
);
5056 *seg
+= sizeof(struct mlx5_mkey_seg
);
5057 *size
+= sizeof(struct mlx5_mkey_seg
) / 16;
5058 handle_post_send_edge(&qp
->sq
, seg
, *size
, cur_edge
);
5061 memcpy_send_wqe(&qp
->sq
, cur_edge
, seg
, size
, mr
->descs
,
5063 *size
= ALIGN(*size
, MLX5_SEND_WQE_BB
>> 4);
5065 set_reg_data_seg(*seg
, mr
, pd
);
5066 *seg
+= sizeof(struct mlx5_wqe_data_seg
);
5067 *size
+= (sizeof(struct mlx5_wqe_data_seg
) / 16);
5072 static void set_linv_wr(struct mlx5_ib_qp
*qp
, void **seg
, int *size
,
5075 set_linv_umr_seg(*seg
);
5076 *seg
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
);
5077 *size
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
) / 16;
5078 handle_post_send_edge(&qp
->sq
, seg
, *size
, cur_edge
);
5079 set_linv_mkey_seg(*seg
);
5080 *seg
+= sizeof(struct mlx5_mkey_seg
);
5081 *size
+= sizeof(struct mlx5_mkey_seg
) / 16;
5082 handle_post_send_edge(&qp
->sq
, seg
, *size
, cur_edge
);
5085 static void dump_wqe(struct mlx5_ib_qp
*qp
, u32 idx
, int size_16
)
5090 pr_debug("dump WQE index %u:\n", idx
);
5091 for (i
= 0, j
= 0; i
< size_16
* 4; i
+= 4, j
+= 4) {
5092 if ((i
& 0xf) == 0) {
5093 p
= mlx5_frag_buf_get_wqe(&qp
->sq
.fbc
, idx
);
5094 pr_debug("WQBB at %p:\n", (void *)p
);
5096 idx
= (idx
+ 1) & (qp
->sq
.wqe_cnt
- 1);
5098 pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p
[j
]),
5099 be32_to_cpu(p
[j
+ 1]), be32_to_cpu(p
[j
+ 2]),
5100 be32_to_cpu(p
[j
+ 3]));
5104 static int __begin_wqe(struct mlx5_ib_qp
*qp
, void **seg
,
5105 struct mlx5_wqe_ctrl_seg
**ctrl
,
5106 const struct ib_send_wr
*wr
, unsigned int *idx
,
5107 int *size
, void **cur_edge
, int nreq
,
5108 bool send_signaled
, bool solicited
)
5110 if (unlikely(mlx5_wq_overflow(&qp
->sq
, nreq
, qp
->ibqp
.send_cq
)))
5113 *idx
= qp
->sq
.cur_post
& (qp
->sq
.wqe_cnt
- 1);
5114 *seg
= mlx5_frag_buf_get_wqe(&qp
->sq
.fbc
, *idx
);
5116 *(uint32_t *)(*seg
+ 8) = 0;
5117 (*ctrl
)->imm
= send_ieth(wr
);
5118 (*ctrl
)->fm_ce_se
= qp
->sq_signal_bits
|
5119 (send_signaled
? MLX5_WQE_CTRL_CQ_UPDATE
: 0) |
5120 (solicited
? MLX5_WQE_CTRL_SOLICITED
: 0);
5122 *seg
+= sizeof(**ctrl
);
5123 *size
= sizeof(**ctrl
) / 16;
5124 *cur_edge
= qp
->sq
.cur_edge
;
5129 static int begin_wqe(struct mlx5_ib_qp
*qp
, void **seg
,
5130 struct mlx5_wqe_ctrl_seg
**ctrl
,
5131 const struct ib_send_wr
*wr
, unsigned *idx
,
5132 int *size
, void **cur_edge
, int nreq
)
5134 return __begin_wqe(qp
, seg
, ctrl
, wr
, idx
, size
, cur_edge
, nreq
,
5135 wr
->send_flags
& IB_SEND_SIGNALED
,
5136 wr
->send_flags
& IB_SEND_SOLICITED
);
5139 static void finish_wqe(struct mlx5_ib_qp
*qp
,
5140 struct mlx5_wqe_ctrl_seg
*ctrl
,
5141 void *seg
, u8 size
, void *cur_edge
,
5142 unsigned int idx
, u64 wr_id
, int nreq
, u8 fence
,
5147 ctrl
->opmod_idx_opcode
= cpu_to_be32(((u32
)(qp
->sq
.cur_post
) << 8) |
5148 mlx5_opcode
| ((u32
)opmod
<< 24));
5149 ctrl
->qpn_ds
= cpu_to_be32(size
| (qp
->trans_qp
.base
.mqp
.qpn
<< 8));
5150 ctrl
->fm_ce_se
|= fence
;
5151 if (unlikely(qp
->flags_en
& MLX5_QP_FLAG_SIGNATURE
))
5152 ctrl
->signature
= wq_sig(ctrl
);
5154 qp
->sq
.wrid
[idx
] = wr_id
;
5155 qp
->sq
.w_list
[idx
].opcode
= mlx5_opcode
;
5156 qp
->sq
.wqe_head
[idx
] = qp
->sq
.head
+ nreq
;
5157 qp
->sq
.cur_post
+= DIV_ROUND_UP(size
* 16, MLX5_SEND_WQE_BB
);
5158 qp
->sq
.w_list
[idx
].next
= qp
->sq
.cur_post
;
5160 /* We save the edge which was possibly updated during the WQE
5161 * construction, into SQ's cache.
5163 seg
= PTR_ALIGN(seg
, MLX5_SEND_WQE_BB
);
5164 qp
->sq
.cur_edge
= (unlikely(seg
== cur_edge
)) ?
5165 get_sq_edge(&qp
->sq
, qp
->sq
.cur_post
&
5166 (qp
->sq
.wqe_cnt
- 1)) :
5170 static int _mlx5_ib_post_send(struct ib_qp
*ibqp
, const struct ib_send_wr
*wr
,
5171 const struct ib_send_wr
**bad_wr
, bool drain
)
5173 struct mlx5_wqe_ctrl_seg
*ctrl
= NULL
; /* compiler warning */
5174 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
5175 struct mlx5_core_dev
*mdev
= dev
->mdev
;
5176 struct ib_reg_wr reg_pi_wr
;
5177 struct mlx5_ib_qp
*qp
;
5178 struct mlx5_ib_mr
*mr
;
5179 struct mlx5_ib_mr
*pi_mr
;
5180 struct mlx5_ib_mr pa_pi_mr
;
5181 struct ib_sig_attrs
*sig_attrs
;
5182 struct mlx5_wqe_xrc_seg
*xrc
;
5185 int uninitialized_var(size
);
5186 unsigned long flags
;
5196 if (unlikely(mdev
->state
== MLX5_DEVICE_STATE_INTERNAL_ERROR
&&
5202 if (unlikely(ibqp
->qp_type
== IB_QPT_GSI
))
5203 return mlx5_ib_gsi_post_send(ibqp
, wr
, bad_wr
);
5208 spin_lock_irqsave(&qp
->sq
.lock
, flags
);
5210 for (nreq
= 0; wr
; nreq
++, wr
= wr
->next
) {
5211 if (unlikely(wr
->opcode
>= ARRAY_SIZE(mlx5_ib_opcode
))) {
5212 mlx5_ib_warn(dev
, "\n");
5218 num_sge
= wr
->num_sge
;
5219 if (unlikely(num_sge
> qp
->sq
.max_gs
)) {
5220 mlx5_ib_warn(dev
, "\n");
5226 err
= begin_wqe(qp
, &seg
, &ctrl
, wr
, &idx
, &size
, &cur_edge
,
5229 mlx5_ib_warn(dev
, "\n");
5235 if (wr
->opcode
== IB_WR_REG_MR
||
5236 wr
->opcode
== IB_WR_REG_MR_INTEGRITY
) {
5237 fence
= dev
->umr_fence
;
5238 next_fence
= MLX5_FENCE_MODE_INITIATOR_SMALL
;
5240 if (wr
->send_flags
& IB_SEND_FENCE
) {
5242 fence
= MLX5_FENCE_MODE_SMALL_AND_FENCE
;
5244 fence
= MLX5_FENCE_MODE_FENCE
;
5246 fence
= qp
->next_fence
;
5250 switch (ibqp
->qp_type
) {
5251 case IB_QPT_XRC_INI
:
5253 seg
+= sizeof(*xrc
);
5254 size
+= sizeof(*xrc
) / 16;
5257 switch (wr
->opcode
) {
5258 case IB_WR_RDMA_READ
:
5259 case IB_WR_RDMA_WRITE
:
5260 case IB_WR_RDMA_WRITE_WITH_IMM
:
5261 set_raddr_seg(seg
, rdma_wr(wr
)->remote_addr
,
5263 seg
+= sizeof(struct mlx5_wqe_raddr_seg
);
5264 size
+= sizeof(struct mlx5_wqe_raddr_seg
) / 16;
5267 case IB_WR_ATOMIC_CMP_AND_SWP
:
5268 case IB_WR_ATOMIC_FETCH_AND_ADD
:
5269 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP
:
5270 mlx5_ib_warn(dev
, "Atomic operations are not supported yet\n");
5275 case IB_WR_LOCAL_INV
:
5276 qp
->sq
.wr_data
[idx
] = IB_WR_LOCAL_INV
;
5277 ctrl
->imm
= cpu_to_be32(wr
->ex
.invalidate_rkey
);
5278 set_linv_wr(qp
, &seg
, &size
, &cur_edge
);
5283 qp
->sq
.wr_data
[idx
] = IB_WR_REG_MR
;
5284 ctrl
->imm
= cpu_to_be32(reg_wr(wr
)->key
);
5285 err
= set_reg_wr(qp
, reg_wr(wr
), &seg
, &size
,
5294 case IB_WR_REG_MR_INTEGRITY
:
5295 qp
->sq
.wr_data
[idx
] = IB_WR_REG_MR_INTEGRITY
;
5297 mr
= to_mmr(reg_wr(wr
)->mr
);
5301 memset(®_pi_wr
, 0,
5302 sizeof(struct ib_reg_wr
));
5304 reg_pi_wr
.mr
= &pi_mr
->ibmr
;
5305 reg_pi_wr
.access
= reg_wr(wr
)->access
;
5306 reg_pi_wr
.key
= pi_mr
->ibmr
.rkey
;
5308 ctrl
->imm
= cpu_to_be32(reg_pi_wr
.key
);
5309 /* UMR for data + prot registration */
5310 err
= set_reg_wr(qp
, ®_pi_wr
, &seg
,
5317 finish_wqe(qp
, ctrl
, seg
, size
,
5318 cur_edge
, idx
, wr
->wr_id
,
5322 err
= begin_wqe(qp
, &seg
, &ctrl
, wr
,
5323 &idx
, &size
, &cur_edge
,
5326 mlx5_ib_warn(dev
, "\n");
5332 memset(&pa_pi_mr
, 0,
5333 sizeof(struct mlx5_ib_mr
));
5334 /* No UMR, use local_dma_lkey */
5335 pa_pi_mr
.ibmr
.lkey
=
5336 mr
->ibmr
.pd
->local_dma_lkey
;
5338 pa_pi_mr
.ndescs
= mr
->ndescs
;
5339 pa_pi_mr
.data_length
= mr
->data_length
;
5340 pa_pi_mr
.data_iova
= mr
->data_iova
;
5341 if (mr
->meta_ndescs
) {
5342 pa_pi_mr
.meta_ndescs
=
5344 pa_pi_mr
.meta_length
=
5346 pa_pi_mr
.pi_iova
= mr
->pi_iova
;
5349 pa_pi_mr
.ibmr
.length
= mr
->ibmr
.length
;
5350 mr
->pi_mr
= &pa_pi_mr
;
5352 ctrl
->imm
= cpu_to_be32(mr
->ibmr
.rkey
);
5353 /* UMR for sig MR */
5354 err
= set_pi_umr_wr(wr
, qp
, &seg
, &size
,
5357 mlx5_ib_warn(dev
, "\n");
5361 finish_wqe(qp
, ctrl
, seg
, size
, cur_edge
, idx
,
5362 wr
->wr_id
, nreq
, fence
,
5366 * SET_PSV WQEs are not signaled and solicited
5369 sig_attrs
= mr
->ibmr
.sig_attrs
;
5370 err
= __begin_wqe(qp
, &seg
, &ctrl
, wr
, &idx
,
5371 &size
, &cur_edge
, nreq
, false,
5374 mlx5_ib_warn(dev
, "\n");
5379 err
= set_psv_wr(&sig_attrs
->mem
,
5380 mr
->sig
->psv_memory
.psv_idx
,
5383 mlx5_ib_warn(dev
, "\n");
5387 finish_wqe(qp
, ctrl
, seg
, size
, cur_edge
, idx
,
5388 wr
->wr_id
, nreq
, next_fence
,
5389 MLX5_OPCODE_SET_PSV
);
5391 err
= __begin_wqe(qp
, &seg
, &ctrl
, wr
, &idx
,
5392 &size
, &cur_edge
, nreq
, false,
5395 mlx5_ib_warn(dev
, "\n");
5400 err
= set_psv_wr(&sig_attrs
->wire
,
5401 mr
->sig
->psv_wire
.psv_idx
,
5404 mlx5_ib_warn(dev
, "\n");
5408 finish_wqe(qp
, ctrl
, seg
, size
, cur_edge
, idx
,
5409 wr
->wr_id
, nreq
, next_fence
,
5410 MLX5_OPCODE_SET_PSV
);
5413 MLX5_FENCE_MODE_INITIATOR_SMALL
;
5423 switch (wr
->opcode
) {
5424 case IB_WR_RDMA_WRITE
:
5425 case IB_WR_RDMA_WRITE_WITH_IMM
:
5426 set_raddr_seg(seg
, rdma_wr(wr
)->remote_addr
,
5428 seg
+= sizeof(struct mlx5_wqe_raddr_seg
);
5429 size
+= sizeof(struct mlx5_wqe_raddr_seg
) / 16;
5438 if (unlikely(!mdev
->port_caps
[qp
->port
- 1].has_smi
)) {
5439 mlx5_ib_warn(dev
, "Send SMP MADs is not allowed\n");
5445 case MLX5_IB_QPT_HW_GSI
:
5446 set_datagram_seg(seg
, wr
);
5447 seg
+= sizeof(struct mlx5_wqe_datagram_seg
);
5448 size
+= sizeof(struct mlx5_wqe_datagram_seg
) / 16;
5449 handle_post_send_edge(&qp
->sq
, &seg
, size
, &cur_edge
);
5453 set_datagram_seg(seg
, wr
);
5454 seg
+= sizeof(struct mlx5_wqe_datagram_seg
);
5455 size
+= sizeof(struct mlx5_wqe_datagram_seg
) / 16;
5456 handle_post_send_edge(&qp
->sq
, &seg
, size
, &cur_edge
);
5458 /* handle qp that supports ud offload */
5459 if (qp
->flags
& IB_QP_CREATE_IPOIB_UD_LSO
) {
5460 struct mlx5_wqe_eth_pad
*pad
;
5463 memset(pad
, 0, sizeof(struct mlx5_wqe_eth_pad
));
5464 seg
+= sizeof(struct mlx5_wqe_eth_pad
);
5465 size
+= sizeof(struct mlx5_wqe_eth_pad
) / 16;
5466 set_eth_seg(wr
, qp
, &seg
, &size
, &cur_edge
);
5467 handle_post_send_edge(&qp
->sq
, &seg
, size
,
5471 case MLX5_IB_QPT_REG_UMR
:
5472 if (wr
->opcode
!= MLX5_IB_WR_UMR
) {
5474 mlx5_ib_warn(dev
, "bad opcode\n");
5477 qp
->sq
.wr_data
[idx
] = MLX5_IB_WR_UMR
;
5478 ctrl
->imm
= cpu_to_be32(umr_wr(wr
)->mkey
);
5479 err
= set_reg_umr_segment(dev
, seg
, wr
, !!(MLX5_CAP_GEN(mdev
, atomic
)));
5482 seg
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
);
5483 size
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
) / 16;
5484 handle_post_send_edge(&qp
->sq
, &seg
, size
, &cur_edge
);
5485 set_reg_mkey_segment(seg
, wr
);
5486 seg
+= sizeof(struct mlx5_mkey_seg
);
5487 size
+= sizeof(struct mlx5_mkey_seg
) / 16;
5488 handle_post_send_edge(&qp
->sq
, &seg
, size
, &cur_edge
);
5495 if (wr
->send_flags
& IB_SEND_INLINE
&& num_sge
) {
5496 err
= set_data_inl_seg(qp
, wr
, &seg
, &size
, &cur_edge
);
5497 if (unlikely(err
)) {
5498 mlx5_ib_warn(dev
, "\n");
5503 for (i
= 0; i
< num_sge
; i
++) {
5504 handle_post_send_edge(&qp
->sq
, &seg
, size
,
5506 if (likely(wr
->sg_list
[i
].length
)) {
5508 ((struct mlx5_wqe_data_seg
*)seg
,
5510 size
+= sizeof(struct mlx5_wqe_data_seg
) / 16;
5511 seg
+= sizeof(struct mlx5_wqe_data_seg
);
5516 qp
->next_fence
= next_fence
;
5517 finish_wqe(qp
, ctrl
, seg
, size
, cur_edge
, idx
, wr
->wr_id
, nreq
,
5518 fence
, mlx5_ib_opcode
[wr
->opcode
]);
5521 dump_wqe(qp
, idx
, size
);
5526 qp
->sq
.head
+= nreq
;
5528 /* Make sure that descriptors are written before
5529 * updating doorbell record and ringing the doorbell
5533 qp
->db
.db
[MLX5_SND_DBR
] = cpu_to_be32(qp
->sq
.cur_post
);
5535 /* Make sure doorbell record is visible to the HCA before
5536 * we hit doorbell */
5539 mlx5_write64((__be32
*)ctrl
, bf
->bfreg
->map
+ bf
->offset
);
5540 /* Make sure doorbells don't leak out of SQ spinlock
5541 * and reach the HCA out of order.
5543 bf
->offset
^= bf
->buf_size
;
5546 spin_unlock_irqrestore(&qp
->sq
.lock
, flags
);
5551 int mlx5_ib_post_send(struct ib_qp
*ibqp
, const struct ib_send_wr
*wr
,
5552 const struct ib_send_wr
**bad_wr
)
5554 return _mlx5_ib_post_send(ibqp
, wr
, bad_wr
, false);
5557 static void set_sig_seg(struct mlx5_rwqe_sig
*sig
, int size
)
5559 sig
->signature
= calc_sig(sig
, size
);
5562 static int _mlx5_ib_post_recv(struct ib_qp
*ibqp
, const struct ib_recv_wr
*wr
,
5563 const struct ib_recv_wr
**bad_wr
, bool drain
)
5565 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
5566 struct mlx5_wqe_data_seg
*scat
;
5567 struct mlx5_rwqe_sig
*sig
;
5568 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
5569 struct mlx5_core_dev
*mdev
= dev
->mdev
;
5570 unsigned long flags
;
5576 if (unlikely(mdev
->state
== MLX5_DEVICE_STATE_INTERNAL_ERROR
&&
5582 if (unlikely(ibqp
->qp_type
== IB_QPT_GSI
))
5583 return mlx5_ib_gsi_post_recv(ibqp
, wr
, bad_wr
);
5585 spin_lock_irqsave(&qp
->rq
.lock
, flags
);
5587 ind
= qp
->rq
.head
& (qp
->rq
.wqe_cnt
- 1);
5589 for (nreq
= 0; wr
; nreq
++, wr
= wr
->next
) {
5590 if (mlx5_wq_overflow(&qp
->rq
, nreq
, qp
->ibqp
.recv_cq
)) {
5596 if (unlikely(wr
->num_sge
> qp
->rq
.max_gs
)) {
5602 scat
= mlx5_frag_buf_get_wqe(&qp
->rq
.fbc
, ind
);
5603 if (qp
->flags_en
& MLX5_QP_FLAG_SIGNATURE
)
5606 for (i
= 0; i
< wr
->num_sge
; i
++)
5607 set_data_ptr_seg(scat
+ i
, wr
->sg_list
+ i
);
5609 if (i
< qp
->rq
.max_gs
) {
5610 scat
[i
].byte_count
= 0;
5611 scat
[i
].lkey
= cpu_to_be32(MLX5_INVALID_LKEY
);
5615 if (qp
->flags_en
& MLX5_QP_FLAG_SIGNATURE
) {
5616 sig
= (struct mlx5_rwqe_sig
*)scat
;
5617 set_sig_seg(sig
, (qp
->rq
.max_gs
+ 1) << 2);
5620 qp
->rq
.wrid
[ind
] = wr
->wr_id
;
5622 ind
= (ind
+ 1) & (qp
->rq
.wqe_cnt
- 1);
5627 qp
->rq
.head
+= nreq
;
5629 /* Make sure that descriptors are written before
5634 *qp
->db
.db
= cpu_to_be32(qp
->rq
.head
& 0xffff);
5637 spin_unlock_irqrestore(&qp
->rq
.lock
, flags
);
5642 int mlx5_ib_post_recv(struct ib_qp
*ibqp
, const struct ib_recv_wr
*wr
,
5643 const struct ib_recv_wr
**bad_wr
)
5645 return _mlx5_ib_post_recv(ibqp
, wr
, bad_wr
, false);
5648 static inline enum ib_qp_state
to_ib_qp_state(enum mlx5_qp_state mlx5_state
)
5650 switch (mlx5_state
) {
5651 case MLX5_QP_STATE_RST
: return IB_QPS_RESET
;
5652 case MLX5_QP_STATE_INIT
: return IB_QPS_INIT
;
5653 case MLX5_QP_STATE_RTR
: return IB_QPS_RTR
;
5654 case MLX5_QP_STATE_RTS
: return IB_QPS_RTS
;
5655 case MLX5_QP_STATE_SQ_DRAINING
:
5656 case MLX5_QP_STATE_SQD
: return IB_QPS_SQD
;
5657 case MLX5_QP_STATE_SQER
: return IB_QPS_SQE
;
5658 case MLX5_QP_STATE_ERR
: return IB_QPS_ERR
;
5663 static inline enum ib_mig_state
to_ib_mig_state(int mlx5_mig_state
)
5665 switch (mlx5_mig_state
) {
5666 case MLX5_QP_PM_ARMED
: return IB_MIG_ARMED
;
5667 case MLX5_QP_PM_REARM
: return IB_MIG_REARM
;
5668 case MLX5_QP_PM_MIGRATED
: return IB_MIG_MIGRATED
;
5673 static int to_ib_qp_access_flags(int mlx5_flags
)
5677 if (mlx5_flags
& MLX5_QP_BIT_RRE
)
5678 ib_flags
|= IB_ACCESS_REMOTE_READ
;
5679 if (mlx5_flags
& MLX5_QP_BIT_RWE
)
5680 ib_flags
|= IB_ACCESS_REMOTE_WRITE
;
5681 if (mlx5_flags
& MLX5_QP_BIT_RAE
)
5682 ib_flags
|= IB_ACCESS_REMOTE_ATOMIC
;
5687 static void to_rdma_ah_attr(struct mlx5_ib_dev
*ibdev
,
5688 struct rdma_ah_attr
*ah_attr
,
5689 struct mlx5_qp_path
*path
)
5692 memset(ah_attr
, 0, sizeof(*ah_attr
));
5694 if (!path
->port
|| path
->port
> ibdev
->num_ports
)
5697 ah_attr
->type
= rdma_ah_find_type(&ibdev
->ib_dev
, path
->port
);
5699 rdma_ah_set_port_num(ah_attr
, path
->port
);
5700 rdma_ah_set_sl(ah_attr
, path
->dci_cfi_prio_sl
& 0xf);
5702 rdma_ah_set_dlid(ah_attr
, be16_to_cpu(path
->rlid
));
5703 rdma_ah_set_path_bits(ah_attr
, path
->grh_mlid
& 0x7f);
5704 rdma_ah_set_static_rate(ah_attr
,
5705 path
->static_rate
? path
->static_rate
- 5 : 0);
5706 if (path
->grh_mlid
& (1 << 7)) {
5707 u32 tc_fl
= be32_to_cpu(path
->tclass_flowlabel
);
5709 rdma_ah_set_grh(ah_attr
, NULL
,
5713 (tc_fl
>> 20) & 0xff);
5714 rdma_ah_set_dgid_raw(ah_attr
, path
->rgid
);
5718 static int query_raw_packet_qp_sq_state(struct mlx5_ib_dev
*dev
,
5719 struct mlx5_ib_sq
*sq
,
5724 err
= mlx5_core_query_sq_state(dev
->mdev
, sq
->base
.mqp
.qpn
, sq_state
);
5727 sq
->state
= *sq_state
;
5733 static int query_raw_packet_qp_rq_state(struct mlx5_ib_dev
*dev
,
5734 struct mlx5_ib_rq
*rq
,
5742 inlen
= MLX5_ST_SZ_BYTES(query_rq_out
);
5743 out
= kvzalloc(inlen
, GFP_KERNEL
);
5747 err
= mlx5_core_query_rq(dev
->mdev
, rq
->base
.mqp
.qpn
, out
);
5751 rqc
= MLX5_ADDR_OF(query_rq_out
, out
, rq_context
);
5752 *rq_state
= MLX5_GET(rqc
, rqc
, state
);
5753 rq
->state
= *rq_state
;
5760 static int sqrq_state_to_qp_state(u8 sq_state
, u8 rq_state
,
5761 struct mlx5_ib_qp
*qp
, u8
*qp_state
)
5763 static const u8 sqrq_trans
[MLX5_RQ_NUM_STATE
][MLX5_SQ_NUM_STATE
] = {
5764 [MLX5_RQC_STATE_RST
] = {
5765 [MLX5_SQC_STATE_RST
] = IB_QPS_RESET
,
5766 [MLX5_SQC_STATE_RDY
] = MLX5_QP_STATE_BAD
,
5767 [MLX5_SQC_STATE_ERR
] = MLX5_QP_STATE_BAD
,
5768 [MLX5_SQ_STATE_NA
] = IB_QPS_RESET
,
5770 [MLX5_RQC_STATE_RDY
] = {
5771 [MLX5_SQC_STATE_RST
] = MLX5_QP_STATE_BAD
,
5772 [MLX5_SQC_STATE_RDY
] = MLX5_QP_STATE
,
5773 [MLX5_SQC_STATE_ERR
] = IB_QPS_SQE
,
5774 [MLX5_SQ_STATE_NA
] = MLX5_QP_STATE
,
5776 [MLX5_RQC_STATE_ERR
] = {
5777 [MLX5_SQC_STATE_RST
] = MLX5_QP_STATE_BAD
,
5778 [MLX5_SQC_STATE_RDY
] = MLX5_QP_STATE_BAD
,
5779 [MLX5_SQC_STATE_ERR
] = IB_QPS_ERR
,
5780 [MLX5_SQ_STATE_NA
] = IB_QPS_ERR
,
5782 [MLX5_RQ_STATE_NA
] = {
5783 [MLX5_SQC_STATE_RST
] = IB_QPS_RESET
,
5784 [MLX5_SQC_STATE_RDY
] = MLX5_QP_STATE
,
5785 [MLX5_SQC_STATE_ERR
] = MLX5_QP_STATE
,
5786 [MLX5_SQ_STATE_NA
] = MLX5_QP_STATE_BAD
,
5790 *qp_state
= sqrq_trans
[rq_state
][sq_state
];
5792 if (*qp_state
== MLX5_QP_STATE_BAD
) {
5793 WARN(1, "Buggy Raw Packet QP state, SQ 0x%x state: 0x%x, RQ 0x%x state: 0x%x",
5794 qp
->raw_packet_qp
.sq
.base
.mqp
.qpn
, sq_state
,
5795 qp
->raw_packet_qp
.rq
.base
.mqp
.qpn
, rq_state
);
5799 if (*qp_state
== MLX5_QP_STATE
)
5800 *qp_state
= qp
->state
;
5805 static int query_raw_packet_qp_state(struct mlx5_ib_dev
*dev
,
5806 struct mlx5_ib_qp
*qp
,
5807 u8
*raw_packet_qp_state
)
5809 struct mlx5_ib_raw_packet_qp
*raw_packet_qp
= &qp
->raw_packet_qp
;
5810 struct mlx5_ib_sq
*sq
= &raw_packet_qp
->sq
;
5811 struct mlx5_ib_rq
*rq
= &raw_packet_qp
->rq
;
5813 u8 sq_state
= MLX5_SQ_STATE_NA
;
5814 u8 rq_state
= MLX5_RQ_STATE_NA
;
5816 if (qp
->sq
.wqe_cnt
) {
5817 err
= query_raw_packet_qp_sq_state(dev
, sq
, &sq_state
);
5822 if (qp
->rq
.wqe_cnt
) {
5823 err
= query_raw_packet_qp_rq_state(dev
, rq
, &rq_state
);
5828 return sqrq_state_to_qp_state(sq_state
, rq_state
, qp
,
5829 raw_packet_qp_state
);
5832 static int query_qp_attr(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
5833 struct ib_qp_attr
*qp_attr
)
5835 int outlen
= MLX5_ST_SZ_BYTES(query_qp_out
);
5836 struct mlx5_qp_context
*context
;
5841 outb
= kzalloc(outlen
, GFP_KERNEL
);
5845 err
= mlx5_core_qp_query(dev
, &qp
->trans_qp
.base
.mqp
, outb
, outlen
);
5849 /* FIXME: use MLX5_GET rather than mlx5_qp_context manual struct */
5850 context
= (struct mlx5_qp_context
*)MLX5_ADDR_OF(query_qp_out
, outb
, qpc
);
5852 mlx5_state
= be32_to_cpu(context
->flags
) >> 28;
5854 qp
->state
= to_ib_qp_state(mlx5_state
);
5855 qp_attr
->path_mtu
= context
->mtu_msgmax
>> 5;
5856 qp_attr
->path_mig_state
=
5857 to_ib_mig_state((be32_to_cpu(context
->flags
) >> 11) & 0x3);
5858 qp_attr
->qkey
= be32_to_cpu(context
->qkey
);
5859 qp_attr
->rq_psn
= be32_to_cpu(context
->rnr_nextrecvpsn
) & 0xffffff;
5860 qp_attr
->sq_psn
= be32_to_cpu(context
->next_send_psn
) & 0xffffff;
5861 qp_attr
->dest_qp_num
= be32_to_cpu(context
->log_pg_sz_remote_qpn
) & 0xffffff;
5862 qp_attr
->qp_access_flags
=
5863 to_ib_qp_access_flags(be32_to_cpu(context
->params2
));
5865 if (qp
->ibqp
.qp_type
== IB_QPT_RC
|| qp
->ibqp
.qp_type
== IB_QPT_UC
) {
5866 to_rdma_ah_attr(dev
, &qp_attr
->ah_attr
, &context
->pri_path
);
5867 to_rdma_ah_attr(dev
, &qp_attr
->alt_ah_attr
, &context
->alt_path
);
5868 qp_attr
->alt_pkey_index
=
5869 be16_to_cpu(context
->alt_path
.pkey_index
);
5870 qp_attr
->alt_port_num
=
5871 rdma_ah_get_port_num(&qp_attr
->alt_ah_attr
);
5874 qp_attr
->pkey_index
= be16_to_cpu(context
->pri_path
.pkey_index
);
5875 qp_attr
->port_num
= context
->pri_path
.port
;
5877 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
5878 qp_attr
->sq_draining
= mlx5_state
== MLX5_QP_STATE_SQ_DRAINING
;
5880 qp_attr
->max_rd_atomic
= 1 << ((be32_to_cpu(context
->params1
) >> 21) & 0x7);
5882 qp_attr
->max_dest_rd_atomic
=
5883 1 << ((be32_to_cpu(context
->params2
) >> 21) & 0x7);
5884 qp_attr
->min_rnr_timer
=
5885 (be32_to_cpu(context
->rnr_nextrecvpsn
) >> 24) & 0x1f;
5886 qp_attr
->timeout
= context
->pri_path
.ackto_lt
>> 3;
5887 qp_attr
->retry_cnt
= (be32_to_cpu(context
->params1
) >> 16) & 0x7;
5888 qp_attr
->rnr_retry
= (be32_to_cpu(context
->params1
) >> 13) & 0x7;
5889 qp_attr
->alt_timeout
= context
->alt_path
.ackto_lt
>> 3;
5896 static int mlx5_ib_dct_query_qp(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*mqp
,
5897 struct ib_qp_attr
*qp_attr
, int qp_attr_mask
,
5898 struct ib_qp_init_attr
*qp_init_attr
)
5900 struct mlx5_core_dct
*dct
= &mqp
->dct
.mdct
;
5902 u32 access_flags
= 0;
5903 int outlen
= MLX5_ST_SZ_BYTES(query_dct_out
);
5906 int supported_mask
= IB_QP_STATE
|
5907 IB_QP_ACCESS_FLAGS
|
5909 IB_QP_MIN_RNR_TIMER
|
5914 if (qp_attr_mask
& ~supported_mask
)
5916 if (mqp
->state
!= IB_QPS_RTR
)
5919 out
= kzalloc(outlen
, GFP_KERNEL
);
5923 err
= mlx5_core_dct_query(dev
, dct
, out
, outlen
);
5927 dctc
= MLX5_ADDR_OF(query_dct_out
, out
, dct_context_entry
);
5929 if (qp_attr_mask
& IB_QP_STATE
)
5930 qp_attr
->qp_state
= IB_QPS_RTR
;
5932 if (qp_attr_mask
& IB_QP_ACCESS_FLAGS
) {
5933 if (MLX5_GET(dctc
, dctc
, rre
))
5934 access_flags
|= IB_ACCESS_REMOTE_READ
;
5935 if (MLX5_GET(dctc
, dctc
, rwe
))
5936 access_flags
|= IB_ACCESS_REMOTE_WRITE
;
5937 if (MLX5_GET(dctc
, dctc
, rae
))
5938 access_flags
|= IB_ACCESS_REMOTE_ATOMIC
;
5939 qp_attr
->qp_access_flags
= access_flags
;
5942 if (qp_attr_mask
& IB_QP_PORT
)
5943 qp_attr
->port_num
= MLX5_GET(dctc
, dctc
, port
);
5944 if (qp_attr_mask
& IB_QP_MIN_RNR_TIMER
)
5945 qp_attr
->min_rnr_timer
= MLX5_GET(dctc
, dctc
, min_rnr_nak
);
5946 if (qp_attr_mask
& IB_QP_AV
) {
5947 qp_attr
->ah_attr
.grh
.traffic_class
= MLX5_GET(dctc
, dctc
, tclass
);
5948 qp_attr
->ah_attr
.grh
.flow_label
= MLX5_GET(dctc
, dctc
, flow_label
);
5949 qp_attr
->ah_attr
.grh
.sgid_index
= MLX5_GET(dctc
, dctc
, my_addr_index
);
5950 qp_attr
->ah_attr
.grh
.hop_limit
= MLX5_GET(dctc
, dctc
, hop_limit
);
5952 if (qp_attr_mask
& IB_QP_PATH_MTU
)
5953 qp_attr
->path_mtu
= MLX5_GET(dctc
, dctc
, mtu
);
5954 if (qp_attr_mask
& IB_QP_PKEY_INDEX
)
5955 qp_attr
->pkey_index
= MLX5_GET(dctc
, dctc
, pkey_index
);
5961 int mlx5_ib_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*qp_attr
,
5962 int qp_attr_mask
, struct ib_qp_init_attr
*qp_init_attr
)
5964 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
5965 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
5967 u8 raw_packet_qp_state
;
5969 if (ibqp
->rwq_ind_tbl
)
5972 if (unlikely(ibqp
->qp_type
== IB_QPT_GSI
))
5973 return mlx5_ib_gsi_query_qp(ibqp
, qp_attr
, qp_attr_mask
,
5976 /* Not all of output fields are applicable, make sure to zero them */
5977 memset(qp_init_attr
, 0, sizeof(*qp_init_attr
));
5978 memset(qp_attr
, 0, sizeof(*qp_attr
));
5980 if (unlikely(qp
->type
== MLX5_IB_QPT_DCT
))
5981 return mlx5_ib_dct_query_qp(dev
, qp
, qp_attr
,
5982 qp_attr_mask
, qp_init_attr
);
5984 mutex_lock(&qp
->mutex
);
5986 if (qp
->ibqp
.qp_type
== IB_QPT_RAW_PACKET
||
5987 qp
->flags
& IB_QP_CREATE_SOURCE_QPN
) {
5988 err
= query_raw_packet_qp_state(dev
, qp
, &raw_packet_qp_state
);
5991 qp
->state
= raw_packet_qp_state
;
5992 qp_attr
->port_num
= 1;
5994 err
= query_qp_attr(dev
, qp
, qp_attr
);
5999 qp_attr
->qp_state
= qp
->state
;
6000 qp_attr
->cur_qp_state
= qp_attr
->qp_state
;
6001 qp_attr
->cap
.max_recv_wr
= qp
->rq
.wqe_cnt
;
6002 qp_attr
->cap
.max_recv_sge
= qp
->rq
.max_gs
;
6004 if (!ibqp
->uobject
) {
6005 qp_attr
->cap
.max_send_wr
= qp
->sq
.max_post
;
6006 qp_attr
->cap
.max_send_sge
= qp
->sq
.max_gs
;
6007 qp_init_attr
->qp_context
= ibqp
->qp_context
;
6009 qp_attr
->cap
.max_send_wr
= 0;
6010 qp_attr
->cap
.max_send_sge
= 0;
6013 qp_init_attr
->qp_type
= ibqp
->qp_type
;
6014 qp_init_attr
->recv_cq
= ibqp
->recv_cq
;
6015 qp_init_attr
->send_cq
= ibqp
->send_cq
;
6016 qp_init_attr
->srq
= ibqp
->srq
;
6017 qp_attr
->cap
.max_inline_data
= qp
->max_inline_data
;
6019 qp_init_attr
->cap
= qp_attr
->cap
;
6021 qp_init_attr
->create_flags
= qp
->flags
;
6023 qp_init_attr
->sq_sig_type
= qp
->sq_signal_bits
& MLX5_WQE_CTRL_CQ_UPDATE
?
6024 IB_SIGNAL_ALL_WR
: IB_SIGNAL_REQ_WR
;
6027 mutex_unlock(&qp
->mutex
);
6031 struct ib_xrcd
*mlx5_ib_alloc_xrcd(struct ib_device
*ibdev
,
6032 struct ib_udata
*udata
)
6034 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
6035 struct mlx5_ib_xrcd
*xrcd
;
6038 if (!MLX5_CAP_GEN(dev
->mdev
, xrc
))
6039 return ERR_PTR(-ENOSYS
);
6041 xrcd
= kmalloc(sizeof(*xrcd
), GFP_KERNEL
);
6043 return ERR_PTR(-ENOMEM
);
6045 err
= mlx5_cmd_xrcd_alloc(dev
->mdev
, &xrcd
->xrcdn
, 0);
6048 return ERR_PTR(-ENOMEM
);
6051 return &xrcd
->ibxrcd
;
6054 int mlx5_ib_dealloc_xrcd(struct ib_xrcd
*xrcd
, struct ib_udata
*udata
)
6056 struct mlx5_ib_dev
*dev
= to_mdev(xrcd
->device
);
6057 u32 xrcdn
= to_mxrcd(xrcd
)->xrcdn
;
6060 err
= mlx5_cmd_xrcd_dealloc(dev
->mdev
, xrcdn
, 0);
6062 mlx5_ib_warn(dev
, "failed to dealloc xrcdn 0x%x\n", xrcdn
);
6068 static void mlx5_ib_wq_event(struct mlx5_core_qp
*core_qp
, int type
)
6070 struct mlx5_ib_rwq
*rwq
= to_mibrwq(core_qp
);
6071 struct mlx5_ib_dev
*dev
= to_mdev(rwq
->ibwq
.device
);
6072 struct ib_event event
;
6074 if (rwq
->ibwq
.event_handler
) {
6075 event
.device
= rwq
->ibwq
.device
;
6076 event
.element
.wq
= &rwq
->ibwq
;
6078 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR
:
6079 event
.event
= IB_EVENT_WQ_FATAL
;
6082 mlx5_ib_warn(dev
, "Unexpected event type %d on WQ %06x\n", type
, core_qp
->qpn
);
6086 rwq
->ibwq
.event_handler(&event
, rwq
->ibwq
.wq_context
);
6090 static int set_delay_drop(struct mlx5_ib_dev
*dev
)
6094 mutex_lock(&dev
->delay_drop
.lock
);
6095 if (dev
->delay_drop
.activate
)
6098 err
= mlx5_core_set_delay_drop(dev
, dev
->delay_drop
.timeout
);
6102 dev
->delay_drop
.activate
= true;
6104 mutex_unlock(&dev
->delay_drop
.lock
);
6107 atomic_inc(&dev
->delay_drop
.rqs_cnt
);
6111 static int create_rq(struct mlx5_ib_rwq
*rwq
, struct ib_pd
*pd
,
6112 struct ib_wq_init_attr
*init_attr
)
6114 struct mlx5_ib_dev
*dev
;
6115 int has_net_offloads
;
6123 dev
= to_mdev(pd
->device
);
6125 inlen
= MLX5_ST_SZ_BYTES(create_rq_in
) + sizeof(u64
) * rwq
->rq_num_pas
;
6126 in
= kvzalloc(inlen
, GFP_KERNEL
);
6130 MLX5_SET(create_rq_in
, in
, uid
, to_mpd(pd
)->uid
);
6131 rqc
= MLX5_ADDR_OF(create_rq_in
, in
, ctx
);
6132 MLX5_SET(rqc
, rqc
, mem_rq_type
,
6133 MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE
);
6134 MLX5_SET(rqc
, rqc
, user_index
, rwq
->user_index
);
6135 MLX5_SET(rqc
, rqc
, cqn
, to_mcq(init_attr
->cq
)->mcq
.cqn
);
6136 MLX5_SET(rqc
, rqc
, state
, MLX5_RQC_STATE_RST
);
6137 MLX5_SET(rqc
, rqc
, flush_in_error_en
, 1);
6138 wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
6139 MLX5_SET(wq
, wq
, wq_type
,
6140 rwq
->create_flags
& MLX5_IB_WQ_FLAGS_STRIDING_RQ
?
6141 MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ
: MLX5_WQ_TYPE_CYCLIC
);
6142 if (init_attr
->create_flags
& IB_WQ_FLAGS_PCI_WRITE_END_PADDING
) {
6143 if (!MLX5_CAP_GEN(dev
->mdev
, end_pad
)) {
6144 mlx5_ib_dbg(dev
, "Scatter end padding is not supported\n");
6148 MLX5_SET(wq
, wq
, end_padding_mode
, MLX5_WQ_END_PAD_MODE_ALIGN
);
6151 MLX5_SET(wq
, wq
, log_wq_stride
, rwq
->log_rq_stride
);
6152 if (rwq
->create_flags
& MLX5_IB_WQ_FLAGS_STRIDING_RQ
) {
6154 * In Firmware number of strides in each WQE is:
6155 * "512 * 2^single_wqe_log_num_of_strides"
6156 * Values 3 to 8 are accepted as 10 to 15, 9 to 18 are
6157 * accepted as 0 to 9
6159 static const u8 fw_map
[] = { 10, 11, 12, 13, 14, 15, 0, 1,
6160 2, 3, 4, 5, 6, 7, 8, 9 };
6161 MLX5_SET(wq
, wq
, two_byte_shift_en
, rwq
->two_byte_shift_en
);
6162 MLX5_SET(wq
, wq
, log_wqe_stride_size
,
6163 rwq
->single_stride_log_num_of_bytes
-
6164 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES
);
6165 MLX5_SET(wq
, wq
, log_wqe_num_of_strides
,
6166 fw_map
[rwq
->log_num_strides
-
6167 MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES
]);
6169 MLX5_SET(wq
, wq
, log_wq_sz
, rwq
->log_rq_size
);
6170 MLX5_SET(wq
, wq
, pd
, to_mpd(pd
)->pdn
);
6171 MLX5_SET(wq
, wq
, page_offset
, rwq
->rq_page_offset
);
6172 MLX5_SET(wq
, wq
, log_wq_pg_sz
, rwq
->log_page_size
);
6173 MLX5_SET(wq
, wq
, wq_signature
, rwq
->wq_sig
);
6174 MLX5_SET64(wq
, wq
, dbr_addr
, rwq
->db
.dma
);
6175 has_net_offloads
= MLX5_CAP_GEN(dev
->mdev
, eth_net_offloads
);
6176 if (init_attr
->create_flags
& IB_WQ_FLAGS_CVLAN_STRIPPING
) {
6177 if (!(has_net_offloads
&& MLX5_CAP_ETH(dev
->mdev
, vlan_cap
))) {
6178 mlx5_ib_dbg(dev
, "VLAN offloads are not supported\n");
6183 MLX5_SET(rqc
, rqc
, vsd
, 1);
6185 if (init_attr
->create_flags
& IB_WQ_FLAGS_SCATTER_FCS
) {
6186 if (!(has_net_offloads
&& MLX5_CAP_ETH(dev
->mdev
, scatter_fcs
))) {
6187 mlx5_ib_dbg(dev
, "Scatter FCS is not supported\n");
6191 MLX5_SET(rqc
, rqc
, scatter_fcs
, 1);
6193 if (init_attr
->create_flags
& IB_WQ_FLAGS_DELAY_DROP
) {
6194 if (!(dev
->ib_dev
.attrs
.raw_packet_caps
&
6195 IB_RAW_PACKET_CAP_DELAY_DROP
)) {
6196 mlx5_ib_dbg(dev
, "Delay drop is not supported\n");
6200 MLX5_SET(rqc
, rqc
, delay_drop_en
, 1);
6202 rq_pas0
= (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
);
6203 mlx5_ib_populate_pas(dev
, rwq
->umem
, rwq
->page_shift
, rq_pas0
, 0);
6204 err
= mlx5_core_create_rq_tracked(dev
, in
, inlen
, &rwq
->core_qp
);
6205 if (!err
&& init_attr
->create_flags
& IB_WQ_FLAGS_DELAY_DROP
) {
6206 err
= set_delay_drop(dev
);
6208 mlx5_ib_warn(dev
, "Failed to enable delay drop err=%d\n",
6210 mlx5_core_destroy_rq_tracked(dev
, &rwq
->core_qp
);
6212 rwq
->create_flags
|= MLX5_IB_WQ_FLAGS_DELAY_DROP
;
6220 static int set_user_rq_size(struct mlx5_ib_dev
*dev
,
6221 struct ib_wq_init_attr
*wq_init_attr
,
6222 struct mlx5_ib_create_wq
*ucmd
,
6223 struct mlx5_ib_rwq
*rwq
)
6225 /* Sanity check RQ size before proceeding */
6226 if (wq_init_attr
->max_wr
> (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_wq_sz
)))
6229 if (!ucmd
->rq_wqe_count
)
6232 rwq
->wqe_count
= ucmd
->rq_wqe_count
;
6233 rwq
->wqe_shift
= ucmd
->rq_wqe_shift
;
6234 if (check_shl_overflow(rwq
->wqe_count
, rwq
->wqe_shift
, &rwq
->buf_size
))
6237 rwq
->log_rq_stride
= rwq
->wqe_shift
;
6238 rwq
->log_rq_size
= ilog2(rwq
->wqe_count
);
6242 static bool log_of_strides_valid(struct mlx5_ib_dev
*dev
, u32 log_num_strides
)
6244 if ((log_num_strides
> MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES
) ||
6245 (log_num_strides
< MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES
))
6248 if (!MLX5_CAP_GEN(dev
->mdev
, ext_stride_num_range
) &&
6249 (log_num_strides
< MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES
))
6255 static int prepare_user_rq(struct ib_pd
*pd
,
6256 struct ib_wq_init_attr
*init_attr
,
6257 struct ib_udata
*udata
,
6258 struct mlx5_ib_rwq
*rwq
)
6260 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
6261 struct mlx5_ib_create_wq ucmd
= {};
6263 size_t required_cmd_sz
;
6265 required_cmd_sz
= offsetof(typeof(ucmd
), single_stride_log_num_of_bytes
)
6266 + sizeof(ucmd
.single_stride_log_num_of_bytes
);
6267 if (udata
->inlen
< required_cmd_sz
) {
6268 mlx5_ib_dbg(dev
, "invalid inlen\n");
6272 if (udata
->inlen
> sizeof(ucmd
) &&
6273 !ib_is_udata_cleared(udata
, sizeof(ucmd
),
6274 udata
->inlen
- sizeof(ucmd
))) {
6275 mlx5_ib_dbg(dev
, "inlen is not supported\n");
6279 if (ib_copy_from_udata(&ucmd
, udata
, min(sizeof(ucmd
), udata
->inlen
))) {
6280 mlx5_ib_dbg(dev
, "copy failed\n");
6284 if (ucmd
.comp_mask
& (~MLX5_IB_CREATE_WQ_STRIDING_RQ
)) {
6285 mlx5_ib_dbg(dev
, "invalid comp mask\n");
6287 } else if (ucmd
.comp_mask
& MLX5_IB_CREATE_WQ_STRIDING_RQ
) {
6288 if (!MLX5_CAP_GEN(dev
->mdev
, striding_rq
)) {
6289 mlx5_ib_dbg(dev
, "Striding RQ is not supported\n");
6292 if ((ucmd
.single_stride_log_num_of_bytes
<
6293 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES
) ||
6294 (ucmd
.single_stride_log_num_of_bytes
>
6295 MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES
)) {
6296 mlx5_ib_dbg(dev
, "Invalid log stride size (%u. Range is %u - %u)\n",
6297 ucmd
.single_stride_log_num_of_bytes
,
6298 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES
,
6299 MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES
);
6302 if (!log_of_strides_valid(dev
,
6303 ucmd
.single_wqe_log_num_of_strides
)) {
6306 "Invalid log num strides (%u. Range is %u - %u)\n",
6307 ucmd
.single_wqe_log_num_of_strides
,
6308 MLX5_CAP_GEN(dev
->mdev
, ext_stride_num_range
) ?
6309 MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES
:
6310 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES
,
6311 MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES
);
6314 rwq
->single_stride_log_num_of_bytes
=
6315 ucmd
.single_stride_log_num_of_bytes
;
6316 rwq
->log_num_strides
= ucmd
.single_wqe_log_num_of_strides
;
6317 rwq
->two_byte_shift_en
= !!ucmd
.two_byte_shift_en
;
6318 rwq
->create_flags
|= MLX5_IB_WQ_FLAGS_STRIDING_RQ
;
6321 err
= set_user_rq_size(dev
, init_attr
, &ucmd
, rwq
);
6323 mlx5_ib_dbg(dev
, "err %d\n", err
);
6327 err
= create_user_rq(dev
, pd
, udata
, rwq
, &ucmd
);
6329 mlx5_ib_dbg(dev
, "err %d\n", err
);
6333 rwq
->user_index
= ucmd
.user_index
;
6337 struct ib_wq
*mlx5_ib_create_wq(struct ib_pd
*pd
,
6338 struct ib_wq_init_attr
*init_attr
,
6339 struct ib_udata
*udata
)
6341 struct mlx5_ib_dev
*dev
;
6342 struct mlx5_ib_rwq
*rwq
;
6343 struct mlx5_ib_create_wq_resp resp
= {};
6344 size_t min_resp_len
;
6348 return ERR_PTR(-ENOSYS
);
6350 min_resp_len
= offsetof(typeof(resp
), reserved
) + sizeof(resp
.reserved
);
6351 if (udata
->outlen
&& udata
->outlen
< min_resp_len
)
6352 return ERR_PTR(-EINVAL
);
6354 if (!capable(CAP_SYS_RAWIO
) &&
6355 init_attr
->create_flags
& IB_WQ_FLAGS_DELAY_DROP
)
6356 return ERR_PTR(-EPERM
);
6358 dev
= to_mdev(pd
->device
);
6359 switch (init_attr
->wq_type
) {
6361 rwq
= kzalloc(sizeof(*rwq
), GFP_KERNEL
);
6363 return ERR_PTR(-ENOMEM
);
6364 err
= prepare_user_rq(pd
, init_attr
, udata
, rwq
);
6367 err
= create_rq(rwq
, pd
, init_attr
);
6372 mlx5_ib_dbg(dev
, "unsupported wq type %d\n",
6373 init_attr
->wq_type
);
6374 return ERR_PTR(-EINVAL
);
6377 rwq
->ibwq
.wq_num
= rwq
->core_qp
.qpn
;
6378 rwq
->ibwq
.state
= IB_WQS_RESET
;
6379 if (udata
->outlen
) {
6380 resp
.response_length
= offsetof(typeof(resp
), response_length
) +
6381 sizeof(resp
.response_length
);
6382 err
= ib_copy_to_udata(udata
, &resp
, resp
.response_length
);
6387 rwq
->core_qp
.event
= mlx5_ib_wq_event
;
6388 rwq
->ibwq
.event_handler
= init_attr
->event_handler
;
6392 mlx5_core_destroy_rq_tracked(dev
, &rwq
->core_qp
);
6394 destroy_user_rq(dev
, pd
, rwq
, udata
);
6397 return ERR_PTR(err
);
6400 void mlx5_ib_destroy_wq(struct ib_wq
*wq
, struct ib_udata
*udata
)
6402 struct mlx5_ib_dev
*dev
= to_mdev(wq
->device
);
6403 struct mlx5_ib_rwq
*rwq
= to_mrwq(wq
);
6405 mlx5_core_destroy_rq_tracked(dev
, &rwq
->core_qp
);
6406 destroy_user_rq(dev
, wq
->pd
, rwq
, udata
);
6410 struct ib_rwq_ind_table
*mlx5_ib_create_rwq_ind_table(struct ib_device
*device
,
6411 struct ib_rwq_ind_table_init_attr
*init_attr
,
6412 struct ib_udata
*udata
)
6414 struct mlx5_ib_dev
*dev
= to_mdev(device
);
6415 struct mlx5_ib_rwq_ind_table
*rwq_ind_tbl
;
6416 int sz
= 1 << init_attr
->log_ind_tbl_size
;
6417 struct mlx5_ib_create_rwq_ind_tbl_resp resp
= {};
6418 size_t min_resp_len
;
6425 if (udata
->inlen
> 0 &&
6426 !ib_is_udata_cleared(udata
, 0,
6428 return ERR_PTR(-EOPNOTSUPP
);
6430 if (init_attr
->log_ind_tbl_size
>
6431 MLX5_CAP_GEN(dev
->mdev
, log_max_rqt_size
)) {
6432 mlx5_ib_dbg(dev
, "log_ind_tbl_size = %d is bigger than supported = %d\n",
6433 init_attr
->log_ind_tbl_size
,
6434 MLX5_CAP_GEN(dev
->mdev
, log_max_rqt_size
));
6435 return ERR_PTR(-EINVAL
);
6438 min_resp_len
= offsetof(typeof(resp
), reserved
) + sizeof(resp
.reserved
);
6439 if (udata
->outlen
&& udata
->outlen
< min_resp_len
)
6440 return ERR_PTR(-EINVAL
);
6442 rwq_ind_tbl
= kzalloc(sizeof(*rwq_ind_tbl
), GFP_KERNEL
);
6444 return ERR_PTR(-ENOMEM
);
6446 inlen
= MLX5_ST_SZ_BYTES(create_rqt_in
) + sizeof(u32
) * sz
;
6447 in
= kvzalloc(inlen
, GFP_KERNEL
);
6453 rqtc
= MLX5_ADDR_OF(create_rqt_in
, in
, rqt_context
);
6455 MLX5_SET(rqtc
, rqtc
, rqt_actual_size
, sz
);
6456 MLX5_SET(rqtc
, rqtc
, rqt_max_size
, sz
);
6458 for (i
= 0; i
< sz
; i
++)
6459 MLX5_SET(rqtc
, rqtc
, rq_num
[i
], init_attr
->ind_tbl
[i
]->wq_num
);
6461 rwq_ind_tbl
->uid
= to_mpd(init_attr
->ind_tbl
[0]->pd
)->uid
;
6462 MLX5_SET(create_rqt_in
, in
, uid
, rwq_ind_tbl
->uid
);
6464 err
= mlx5_core_create_rqt(dev
->mdev
, in
, inlen
, &rwq_ind_tbl
->rqtn
);
6470 rwq_ind_tbl
->ib_rwq_ind_tbl
.ind_tbl_num
= rwq_ind_tbl
->rqtn
;
6471 if (udata
->outlen
) {
6472 resp
.response_length
= offsetof(typeof(resp
), response_length
) +
6473 sizeof(resp
.response_length
);
6474 err
= ib_copy_to_udata(udata
, &resp
, resp
.response_length
);
6479 return &rwq_ind_tbl
->ib_rwq_ind_tbl
;
6482 mlx5_cmd_destroy_rqt(dev
->mdev
, rwq_ind_tbl
->rqtn
, rwq_ind_tbl
->uid
);
6485 return ERR_PTR(err
);
6488 int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table
*ib_rwq_ind_tbl
)
6490 struct mlx5_ib_rwq_ind_table
*rwq_ind_tbl
= to_mrwq_ind_table(ib_rwq_ind_tbl
);
6491 struct mlx5_ib_dev
*dev
= to_mdev(ib_rwq_ind_tbl
->device
);
6493 mlx5_cmd_destroy_rqt(dev
->mdev
, rwq_ind_tbl
->rqtn
, rwq_ind_tbl
->uid
);
6499 int mlx5_ib_modify_wq(struct ib_wq
*wq
, struct ib_wq_attr
*wq_attr
,
6500 u32 wq_attr_mask
, struct ib_udata
*udata
)
6502 struct mlx5_ib_dev
*dev
= to_mdev(wq
->device
);
6503 struct mlx5_ib_rwq
*rwq
= to_mrwq(wq
);
6504 struct mlx5_ib_modify_wq ucmd
= {};
6505 size_t required_cmd_sz
;
6513 required_cmd_sz
= offsetof(typeof(ucmd
), reserved
) + sizeof(ucmd
.reserved
);
6514 if (udata
->inlen
< required_cmd_sz
)
6517 if (udata
->inlen
> sizeof(ucmd
) &&
6518 !ib_is_udata_cleared(udata
, sizeof(ucmd
),
6519 udata
->inlen
- sizeof(ucmd
)))
6522 if (ib_copy_from_udata(&ucmd
, udata
, min(sizeof(ucmd
), udata
->inlen
)))
6525 if (ucmd
.comp_mask
|| ucmd
.reserved
)
6528 inlen
= MLX5_ST_SZ_BYTES(modify_rq_in
);
6529 in
= kvzalloc(inlen
, GFP_KERNEL
);
6533 rqc
= MLX5_ADDR_OF(modify_rq_in
, in
, ctx
);
6535 curr_wq_state
= (wq_attr_mask
& IB_WQ_CUR_STATE
) ?
6536 wq_attr
->curr_wq_state
: wq
->state
;
6537 wq_state
= (wq_attr_mask
& IB_WQ_STATE
) ?
6538 wq_attr
->wq_state
: curr_wq_state
;
6539 if (curr_wq_state
== IB_WQS_ERR
)
6540 curr_wq_state
= MLX5_RQC_STATE_ERR
;
6541 if (wq_state
== IB_WQS_ERR
)
6542 wq_state
= MLX5_RQC_STATE_ERR
;
6543 MLX5_SET(modify_rq_in
, in
, rq_state
, curr_wq_state
);
6544 MLX5_SET(modify_rq_in
, in
, uid
, to_mpd(wq
->pd
)->uid
);
6545 MLX5_SET(rqc
, rqc
, state
, wq_state
);
6547 if (wq_attr_mask
& IB_WQ_FLAGS
) {
6548 if (wq_attr
->flags_mask
& IB_WQ_FLAGS_CVLAN_STRIPPING
) {
6549 if (!(MLX5_CAP_GEN(dev
->mdev
, eth_net_offloads
) &&
6550 MLX5_CAP_ETH(dev
->mdev
, vlan_cap
))) {
6551 mlx5_ib_dbg(dev
, "VLAN offloads are not "
6556 MLX5_SET64(modify_rq_in
, in
, modify_bitmask
,
6557 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD
);
6558 MLX5_SET(rqc
, rqc
, vsd
,
6559 (wq_attr
->flags
& IB_WQ_FLAGS_CVLAN_STRIPPING
) ? 0 : 1);
6562 if (wq_attr
->flags_mask
& IB_WQ_FLAGS_PCI_WRITE_END_PADDING
) {
6563 mlx5_ib_dbg(dev
, "Modifying scatter end padding is not supported\n");
6569 if (curr_wq_state
== IB_WQS_RESET
&& wq_state
== IB_WQS_RDY
) {
6572 set_id
= mlx5_ib_get_counters_id(dev
, 0);
6573 if (MLX5_CAP_GEN(dev
->mdev
, modify_rq_counter_set_id
)) {
6574 MLX5_SET64(modify_rq_in
, in
, modify_bitmask
,
6575 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID
);
6576 MLX5_SET(rqc
, rqc
, counter_set_id
, set_id
);
6580 "Receive WQ counters are not supported on current FW\n");
6583 err
= mlx5_core_modify_rq(dev
->mdev
, rwq
->core_qp
.qpn
, in
);
6585 rwq
->ibwq
.state
= (wq_state
== MLX5_RQC_STATE_ERR
) ? IB_WQS_ERR
: wq_state
;
6592 struct mlx5_ib_drain_cqe
{
6594 struct completion done
;
6597 static void mlx5_ib_drain_qp_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
6599 struct mlx5_ib_drain_cqe
*cqe
= container_of(wc
->wr_cqe
,
6600 struct mlx5_ib_drain_cqe
,
6603 complete(&cqe
->done
);
6606 /* This function returns only once the drained WR was completed */
6607 static void handle_drain_completion(struct ib_cq
*cq
,
6608 struct mlx5_ib_drain_cqe
*sdrain
,
6609 struct mlx5_ib_dev
*dev
)
6611 struct mlx5_core_dev
*mdev
= dev
->mdev
;
6613 if (cq
->poll_ctx
== IB_POLL_DIRECT
) {
6614 while (wait_for_completion_timeout(&sdrain
->done
, HZ
/ 10) <= 0)
6615 ib_process_cq_direct(cq
, -1);
6619 if (mdev
->state
== MLX5_DEVICE_STATE_INTERNAL_ERROR
) {
6620 struct mlx5_ib_cq
*mcq
= to_mcq(cq
);
6621 bool triggered
= false;
6622 unsigned long flags
;
6624 spin_lock_irqsave(&dev
->reset_flow_resource_lock
, flags
);
6625 /* Make sure that the CQ handler won't run if wasn't run yet */
6626 if (!mcq
->mcq
.reset_notify_added
)
6627 mcq
->mcq
.reset_notify_added
= 1;
6630 spin_unlock_irqrestore(&dev
->reset_flow_resource_lock
, flags
);
6633 /* Wait for any scheduled/running task to be ended */
6634 switch (cq
->poll_ctx
) {
6635 case IB_POLL_SOFTIRQ
:
6636 irq_poll_disable(&cq
->iop
);
6637 irq_poll_enable(&cq
->iop
);
6639 case IB_POLL_WORKQUEUE
:
6640 cancel_work_sync(&cq
->work
);
6647 /* Run the CQ handler - this makes sure that the drain WR will
6648 * be processed if wasn't processed yet.
6650 mcq
->mcq
.comp(&mcq
->mcq
, NULL
);
6653 wait_for_completion(&sdrain
->done
);
6656 void mlx5_ib_drain_sq(struct ib_qp
*qp
)
6658 struct ib_cq
*cq
= qp
->send_cq
;
6659 struct ib_qp_attr attr
= { .qp_state
= IB_QPS_ERR
};
6660 struct mlx5_ib_drain_cqe sdrain
;
6661 const struct ib_send_wr
*bad_swr
;
6662 struct ib_rdma_wr swr
= {
6665 { .wr_cqe
= &sdrain
.cqe
, },
6666 .opcode
= IB_WR_RDMA_WRITE
,
6670 struct mlx5_ib_dev
*dev
= to_mdev(qp
->device
);
6671 struct mlx5_core_dev
*mdev
= dev
->mdev
;
6673 ret
= ib_modify_qp(qp
, &attr
, IB_QP_STATE
);
6674 if (ret
&& mdev
->state
!= MLX5_DEVICE_STATE_INTERNAL_ERROR
) {
6675 WARN_ONCE(ret
, "failed to drain send queue: %d\n", ret
);
6679 sdrain
.cqe
.done
= mlx5_ib_drain_qp_done
;
6680 init_completion(&sdrain
.done
);
6682 ret
= _mlx5_ib_post_send(qp
, &swr
.wr
, &bad_swr
, true);
6684 WARN_ONCE(ret
, "failed to drain send queue: %d\n", ret
);
6688 handle_drain_completion(cq
, &sdrain
, dev
);
6691 void mlx5_ib_drain_rq(struct ib_qp
*qp
)
6693 struct ib_cq
*cq
= qp
->recv_cq
;
6694 struct ib_qp_attr attr
= { .qp_state
= IB_QPS_ERR
};
6695 struct mlx5_ib_drain_cqe rdrain
;
6696 struct ib_recv_wr rwr
= {};
6697 const struct ib_recv_wr
*bad_rwr
;
6699 struct mlx5_ib_dev
*dev
= to_mdev(qp
->device
);
6700 struct mlx5_core_dev
*mdev
= dev
->mdev
;
6702 ret
= ib_modify_qp(qp
, &attr
, IB_QP_STATE
);
6703 if (ret
&& mdev
->state
!= MLX5_DEVICE_STATE_INTERNAL_ERROR
) {
6704 WARN_ONCE(ret
, "failed to drain recv queue: %d\n", ret
);
6708 rwr
.wr_cqe
= &rdrain
.cqe
;
6709 rdrain
.cqe
.done
= mlx5_ib_drain_qp_done
;
6710 init_completion(&rdrain
.done
);
6712 ret
= _mlx5_ib_post_recv(qp
, &rwr
, &bad_rwr
, true);
6714 WARN_ONCE(ret
, "failed to drain recv queue: %d\n", ret
);
6718 handle_drain_completion(cq
, &rdrain
, dev
);
6722 * Bind a qp to a counter. If @counter is NULL then bind the qp to
6723 * the default counter
6725 int mlx5_ib_qp_set_counter(struct ib_qp
*qp
, struct rdma_counter
*counter
)
6727 struct mlx5_ib_dev
*dev
= to_mdev(qp
->device
);
6728 struct mlx5_ib_qp
*mqp
= to_mqp(qp
);
6731 mutex_lock(&mqp
->mutex
);
6732 if (mqp
->state
== IB_QPS_RESET
) {
6733 qp
->counter
= counter
;
6737 if (!MLX5_CAP_GEN(dev
->mdev
, rts2rts_qp_counters_set_id
)) {
6742 if (mqp
->state
== IB_QPS_RTS
) {
6743 err
= __mlx5_ib_qp_set_counter(qp
, counter
);
6745 qp
->counter
= counter
;
6750 mqp
->counter_pending
= 1;
6751 qp
->counter
= counter
;
6754 mutex_unlock(&mqp
->mutex
);