2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
34 #include <rdma/ib_umem.h>
35 #include <rdma/ib_cache.h>
36 #include <rdma/ib_user_verbs.h>
37 #include <linux/mlx5/fs.h>
41 /* not supported currently */
42 static int wq_signature
;
45 MLX5_IB_ACK_REQ_FREQ
= 8,
49 MLX5_IB_DEFAULT_SCHED_QUEUE
= 0x83,
50 MLX5_IB_DEFAULT_QP0_SCHED_QUEUE
= 0x3f,
51 MLX5_IB_LINK_TYPE_IB
= 0,
52 MLX5_IB_LINK_TYPE_ETH
= 1
56 MLX5_IB_SQ_STRIDE
= 6,
57 MLX5_IB_SQ_UMR_INLINE_THRESHOLD
= 64,
60 static const u32 mlx5_ib_opcode
[] = {
61 [IB_WR_SEND
] = MLX5_OPCODE_SEND
,
62 [IB_WR_LSO
] = MLX5_OPCODE_LSO
,
63 [IB_WR_SEND_WITH_IMM
] = MLX5_OPCODE_SEND_IMM
,
64 [IB_WR_RDMA_WRITE
] = MLX5_OPCODE_RDMA_WRITE
,
65 [IB_WR_RDMA_WRITE_WITH_IMM
] = MLX5_OPCODE_RDMA_WRITE_IMM
,
66 [IB_WR_RDMA_READ
] = MLX5_OPCODE_RDMA_READ
,
67 [IB_WR_ATOMIC_CMP_AND_SWP
] = MLX5_OPCODE_ATOMIC_CS
,
68 [IB_WR_ATOMIC_FETCH_AND_ADD
] = MLX5_OPCODE_ATOMIC_FA
,
69 [IB_WR_SEND_WITH_INV
] = MLX5_OPCODE_SEND_INVAL
,
70 [IB_WR_LOCAL_INV
] = MLX5_OPCODE_UMR
,
71 [IB_WR_REG_MR
] = MLX5_OPCODE_UMR
,
72 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP
] = MLX5_OPCODE_ATOMIC_MASKED_CS
,
73 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
] = MLX5_OPCODE_ATOMIC_MASKED_FA
,
74 [MLX5_IB_WR_UMR
] = MLX5_OPCODE_UMR
,
77 struct mlx5_wqe_eth_pad
{
81 enum raw_qp_set_mask_map
{
82 MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID
= 1UL << 0,
83 MLX5_RAW_QP_RATE_LIMIT
= 1UL << 1,
86 struct mlx5_modify_raw_qp_param
{
89 u32 set_mask
; /* raw_qp_set_mask_map */
91 struct mlx5_rate_limit rl
;
96 static void get_cqs(enum ib_qp_type qp_type
,
97 struct ib_cq
*ib_send_cq
, struct ib_cq
*ib_recv_cq
,
98 struct mlx5_ib_cq
**send_cq
, struct mlx5_ib_cq
**recv_cq
);
100 static int is_qp0(enum ib_qp_type qp_type
)
102 return qp_type
== IB_QPT_SMI
;
105 static int is_sqp(enum ib_qp_type qp_type
)
107 return is_qp0(qp_type
) || is_qp1(qp_type
);
110 static void *get_wqe(struct mlx5_ib_qp
*qp
, int offset
)
112 return mlx5_buf_offset(&qp
->buf
, offset
);
115 static void *get_recv_wqe(struct mlx5_ib_qp
*qp
, int n
)
117 return get_wqe(qp
, qp
->rq
.offset
+ (n
<< qp
->rq
.wqe_shift
));
120 void *mlx5_get_send_wqe(struct mlx5_ib_qp
*qp
, int n
)
122 return get_wqe(qp
, qp
->sq
.offset
+ (n
<< MLX5_IB_SQ_STRIDE
));
126 * mlx5_ib_read_user_wqe() - Copy a user-space WQE to kernel space.
128 * @qp: QP to copy from.
129 * @send: copy from the send queue when non-zero, use the receive queue
131 * @wqe_index: index to start copying from. For send work queues, the
132 * wqe_index is in units of MLX5_SEND_WQE_BB.
133 * For receive work queue, it is the number of work queue
134 * element in the queue.
135 * @buffer: destination buffer.
136 * @length: maximum number of bytes to copy.
138 * Copies at least a single WQE, but may copy more data.
140 * Return: the number of bytes copied, or an error code.
142 int mlx5_ib_read_user_wqe(struct mlx5_ib_qp
*qp
, int send
, int wqe_index
,
143 void *buffer
, u32 length
,
144 struct mlx5_ib_qp_base
*base
)
146 struct ib_device
*ibdev
= qp
->ibqp
.device
;
147 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
148 struct mlx5_ib_wq
*wq
= send
? &qp
->sq
: &qp
->rq
;
151 struct ib_umem
*umem
= base
->ubuffer
.umem
;
152 u32 first_copy_length
;
156 if (wq
->wqe_cnt
== 0) {
157 mlx5_ib_dbg(dev
, "mlx5_ib_read_user_wqe for a QP with wqe_cnt == 0. qp_type: 0x%x\n",
162 offset
= wq
->offset
+ ((wqe_index
% wq
->wqe_cnt
) << wq
->wqe_shift
);
163 wq_end
= wq
->offset
+ (wq
->wqe_cnt
<< wq
->wqe_shift
);
165 if (send
&& length
< sizeof(struct mlx5_wqe_ctrl_seg
))
168 if (offset
> umem
->length
||
169 (send
&& offset
+ sizeof(struct mlx5_wqe_ctrl_seg
) > umem
->length
))
172 first_copy_length
= min_t(u32
, offset
+ length
, wq_end
) - offset
;
173 ret
= ib_umem_copy_from(buffer
, umem
, offset
, first_copy_length
);
178 struct mlx5_wqe_ctrl_seg
*ctrl
= buffer
;
179 int ds
= be32_to_cpu(ctrl
->qpn_ds
) & MLX5_WQE_CTRL_DS_MASK
;
181 wqe_length
= ds
* MLX5_WQE_DS_UNITS
;
183 wqe_length
= 1 << wq
->wqe_shift
;
186 if (wqe_length
<= first_copy_length
)
187 return first_copy_length
;
189 ret
= ib_umem_copy_from(buffer
+ first_copy_length
, umem
, wq
->offset
,
190 wqe_length
- first_copy_length
);
197 static void mlx5_ib_qp_event(struct mlx5_core_qp
*qp
, int type
)
199 struct ib_qp
*ibqp
= &to_mibqp(qp
)->ibqp
;
200 struct ib_event event
;
202 if (type
== MLX5_EVENT_TYPE_PATH_MIG
) {
203 /* This event is only valid for trans_qps */
204 to_mibqp(qp
)->port
= to_mibqp(qp
)->trans_qp
.alt_port
;
207 if (ibqp
->event_handler
) {
208 event
.device
= ibqp
->device
;
209 event
.element
.qp
= ibqp
;
211 case MLX5_EVENT_TYPE_PATH_MIG
:
212 event
.event
= IB_EVENT_PATH_MIG
;
214 case MLX5_EVENT_TYPE_COMM_EST
:
215 event
.event
= IB_EVENT_COMM_EST
;
217 case MLX5_EVENT_TYPE_SQ_DRAINED
:
218 event
.event
= IB_EVENT_SQ_DRAINED
;
220 case MLX5_EVENT_TYPE_SRQ_LAST_WQE
:
221 event
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
223 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR
:
224 event
.event
= IB_EVENT_QP_FATAL
;
226 case MLX5_EVENT_TYPE_PATH_MIG_FAILED
:
227 event
.event
= IB_EVENT_PATH_MIG_ERR
;
229 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR
:
230 event
.event
= IB_EVENT_QP_REQ_ERR
;
232 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR
:
233 event
.event
= IB_EVENT_QP_ACCESS_ERR
;
236 pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type
, qp
->qpn
);
240 ibqp
->event_handler(&event
, ibqp
->qp_context
);
244 static int set_rq_size(struct mlx5_ib_dev
*dev
, struct ib_qp_cap
*cap
,
245 int has_rq
, struct mlx5_ib_qp
*qp
, struct mlx5_ib_create_qp
*ucmd
)
250 /* Sanity check RQ size before proceeding */
251 if (cap
->max_recv_wr
> (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_qp_sz
)))
257 qp
->rq
.wqe_shift
= 0;
258 cap
->max_recv_wr
= 0;
259 cap
->max_recv_sge
= 0;
262 qp
->rq
.wqe_cnt
= ucmd
->rq_wqe_count
;
263 if (ucmd
->rq_wqe_shift
> BITS_PER_BYTE
* sizeof(ucmd
->rq_wqe_shift
))
265 qp
->rq
.wqe_shift
= ucmd
->rq_wqe_shift
;
266 if ((1 << qp
->rq
.wqe_shift
) / sizeof(struct mlx5_wqe_data_seg
) < qp
->wq_sig
)
268 qp
->rq
.max_gs
= (1 << qp
->rq
.wqe_shift
) / sizeof(struct mlx5_wqe_data_seg
) - qp
->wq_sig
;
269 qp
->rq
.max_post
= qp
->rq
.wqe_cnt
;
271 wqe_size
= qp
->wq_sig
? sizeof(struct mlx5_wqe_signature_seg
) : 0;
272 wqe_size
+= cap
->max_recv_sge
* sizeof(struct mlx5_wqe_data_seg
);
273 wqe_size
= roundup_pow_of_two(wqe_size
);
274 wq_size
= roundup_pow_of_two(cap
->max_recv_wr
) * wqe_size
;
275 wq_size
= max_t(int, wq_size
, MLX5_SEND_WQE_BB
);
276 qp
->rq
.wqe_cnt
= wq_size
/ wqe_size
;
277 if (wqe_size
> MLX5_CAP_GEN(dev
->mdev
, max_wqe_sz_rq
)) {
278 mlx5_ib_dbg(dev
, "wqe_size %d, max %d\n",
280 MLX5_CAP_GEN(dev
->mdev
,
284 qp
->rq
.wqe_shift
= ilog2(wqe_size
);
285 qp
->rq
.max_gs
= (1 << qp
->rq
.wqe_shift
) / sizeof(struct mlx5_wqe_data_seg
) - qp
->wq_sig
;
286 qp
->rq
.max_post
= qp
->rq
.wqe_cnt
;
293 static int sq_overhead(struct ib_qp_init_attr
*attr
)
297 switch (attr
->qp_type
) {
299 size
+= sizeof(struct mlx5_wqe_xrc_seg
);
302 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
303 max(sizeof(struct mlx5_wqe_atomic_seg
) +
304 sizeof(struct mlx5_wqe_raddr_seg
),
305 sizeof(struct mlx5_wqe_umr_ctrl_seg
) +
306 sizeof(struct mlx5_mkey_seg
) +
307 MLX5_IB_SQ_UMR_INLINE_THRESHOLD
/
308 MLX5_IB_UMR_OCTOWORD
);
315 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
316 max(sizeof(struct mlx5_wqe_raddr_seg
),
317 sizeof(struct mlx5_wqe_umr_ctrl_seg
) +
318 sizeof(struct mlx5_mkey_seg
));
322 if (attr
->create_flags
& IB_QP_CREATE_IPOIB_UD_LSO
)
323 size
+= sizeof(struct mlx5_wqe_eth_pad
) +
324 sizeof(struct mlx5_wqe_eth_seg
);
327 case MLX5_IB_QPT_HW_GSI
:
328 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
329 sizeof(struct mlx5_wqe_datagram_seg
);
332 case MLX5_IB_QPT_REG_UMR
:
333 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
334 sizeof(struct mlx5_wqe_umr_ctrl_seg
) +
335 sizeof(struct mlx5_mkey_seg
);
345 static int calc_send_wqe(struct ib_qp_init_attr
*attr
)
350 size
= sq_overhead(attr
);
354 if (attr
->cap
.max_inline_data
) {
355 inl_size
= size
+ sizeof(struct mlx5_wqe_inline_seg
) +
356 attr
->cap
.max_inline_data
;
359 size
+= attr
->cap
.max_send_sge
* sizeof(struct mlx5_wqe_data_seg
);
360 if (attr
->create_flags
& IB_QP_CREATE_SIGNATURE_EN
&&
361 ALIGN(max_t(int, inl_size
, size
), MLX5_SEND_WQE_BB
) < MLX5_SIG_WQE_SIZE
)
362 return MLX5_SIG_WQE_SIZE
;
364 return ALIGN(max_t(int, inl_size
, size
), MLX5_SEND_WQE_BB
);
367 static int get_send_sge(struct ib_qp_init_attr
*attr
, int wqe_size
)
371 if (attr
->qp_type
== IB_QPT_RC
)
372 max_sge
= (min_t(int, wqe_size
, 512) -
373 sizeof(struct mlx5_wqe_ctrl_seg
) -
374 sizeof(struct mlx5_wqe_raddr_seg
)) /
375 sizeof(struct mlx5_wqe_data_seg
);
376 else if (attr
->qp_type
== IB_QPT_XRC_INI
)
377 max_sge
= (min_t(int, wqe_size
, 512) -
378 sizeof(struct mlx5_wqe_ctrl_seg
) -
379 sizeof(struct mlx5_wqe_xrc_seg
) -
380 sizeof(struct mlx5_wqe_raddr_seg
)) /
381 sizeof(struct mlx5_wqe_data_seg
);
383 max_sge
= (wqe_size
- sq_overhead(attr
)) /
384 sizeof(struct mlx5_wqe_data_seg
);
386 return min_t(int, max_sge
, wqe_size
- sq_overhead(attr
) /
387 sizeof(struct mlx5_wqe_data_seg
));
390 static int calc_sq_size(struct mlx5_ib_dev
*dev
, struct ib_qp_init_attr
*attr
,
391 struct mlx5_ib_qp
*qp
)
396 if (!attr
->cap
.max_send_wr
)
399 wqe_size
= calc_send_wqe(attr
);
400 mlx5_ib_dbg(dev
, "wqe_size %d\n", wqe_size
);
404 if (wqe_size
> MLX5_CAP_GEN(dev
->mdev
, max_wqe_sz_sq
)) {
405 mlx5_ib_dbg(dev
, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
406 wqe_size
, MLX5_CAP_GEN(dev
->mdev
, max_wqe_sz_sq
));
410 qp
->max_inline_data
= wqe_size
- sq_overhead(attr
) -
411 sizeof(struct mlx5_wqe_inline_seg
);
412 attr
->cap
.max_inline_data
= qp
->max_inline_data
;
414 if (attr
->create_flags
& IB_QP_CREATE_SIGNATURE_EN
)
415 qp
->signature_en
= true;
417 wq_size
= roundup_pow_of_two(attr
->cap
.max_send_wr
* wqe_size
);
418 qp
->sq
.wqe_cnt
= wq_size
/ MLX5_SEND_WQE_BB
;
419 if (qp
->sq
.wqe_cnt
> (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_qp_sz
))) {
420 mlx5_ib_dbg(dev
, "send queue size (%d * %d / %d -> %d) exceeds limits(%d)\n",
421 attr
->cap
.max_send_wr
, wqe_size
, MLX5_SEND_WQE_BB
,
423 1 << MLX5_CAP_GEN(dev
->mdev
, log_max_qp_sz
));
426 qp
->sq
.wqe_shift
= ilog2(MLX5_SEND_WQE_BB
);
427 qp
->sq
.max_gs
= get_send_sge(attr
, wqe_size
);
428 if (qp
->sq
.max_gs
< attr
->cap
.max_send_sge
)
431 attr
->cap
.max_send_sge
= qp
->sq
.max_gs
;
432 qp
->sq
.max_post
= wq_size
/ wqe_size
;
433 attr
->cap
.max_send_wr
= qp
->sq
.max_post
;
438 static int set_user_buf_size(struct mlx5_ib_dev
*dev
,
439 struct mlx5_ib_qp
*qp
,
440 struct mlx5_ib_create_qp
*ucmd
,
441 struct mlx5_ib_qp_base
*base
,
442 struct ib_qp_init_attr
*attr
)
444 int desc_sz
= 1 << qp
->sq
.wqe_shift
;
446 if (desc_sz
> MLX5_CAP_GEN(dev
->mdev
, max_wqe_sz_sq
)) {
447 mlx5_ib_warn(dev
, "desc_sz %d, max_sq_desc_sz %d\n",
448 desc_sz
, MLX5_CAP_GEN(dev
->mdev
, max_wqe_sz_sq
));
452 if (ucmd
->sq_wqe_count
&& ((1 << ilog2(ucmd
->sq_wqe_count
)) != ucmd
->sq_wqe_count
)) {
453 mlx5_ib_warn(dev
, "sq_wqe_count %d, sq_wqe_count %d\n",
454 ucmd
->sq_wqe_count
, ucmd
->sq_wqe_count
);
458 qp
->sq
.wqe_cnt
= ucmd
->sq_wqe_count
;
460 if (qp
->sq
.wqe_cnt
> (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_qp_sz
))) {
461 mlx5_ib_warn(dev
, "wqe_cnt %d, max_wqes %d\n",
463 1 << MLX5_CAP_GEN(dev
->mdev
, log_max_qp_sz
));
467 if (attr
->qp_type
== IB_QPT_RAW_PACKET
||
468 qp
->flags
& MLX5_IB_QP_UNDERLAY
) {
469 base
->ubuffer
.buf_size
= qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
;
470 qp
->raw_packet_qp
.sq
.ubuffer
.buf_size
= qp
->sq
.wqe_cnt
<< 6;
472 base
->ubuffer
.buf_size
= (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
) +
473 (qp
->sq
.wqe_cnt
<< 6);
479 static int qp_has_rq(struct ib_qp_init_attr
*attr
)
481 if (attr
->qp_type
== IB_QPT_XRC_INI
||
482 attr
->qp_type
== IB_QPT_XRC_TGT
|| attr
->srq
||
483 attr
->qp_type
== MLX5_IB_QPT_REG_UMR
||
484 !attr
->cap
.max_recv_wr
)
491 /* this is the first blue flame register in the array of bfregs assigned
492 * to a processes. Since we do not use it for blue flame but rather
493 * regular 64 bit doorbells, we do not need a lock for maintaiing
496 NUM_NON_BLUE_FLAME_BFREGS
= 1,
499 static int max_bfregs(struct mlx5_ib_dev
*dev
, struct mlx5_bfreg_info
*bfregi
)
501 return get_num_static_uars(dev
, bfregi
) * MLX5_NON_FP_BFREGS_PER_UAR
;
504 static int num_med_bfreg(struct mlx5_ib_dev
*dev
,
505 struct mlx5_bfreg_info
*bfregi
)
509 n
= max_bfregs(dev
, bfregi
) - bfregi
->num_low_latency_bfregs
-
510 NUM_NON_BLUE_FLAME_BFREGS
;
512 return n
>= 0 ? n
: 0;
515 static int first_med_bfreg(struct mlx5_ib_dev
*dev
,
516 struct mlx5_bfreg_info
*bfregi
)
518 return num_med_bfreg(dev
, bfregi
) ? 1 : -ENOMEM
;
521 static int first_hi_bfreg(struct mlx5_ib_dev
*dev
,
522 struct mlx5_bfreg_info
*bfregi
)
526 med
= num_med_bfreg(dev
, bfregi
);
530 static int alloc_high_class_bfreg(struct mlx5_ib_dev
*dev
,
531 struct mlx5_bfreg_info
*bfregi
)
535 for (i
= first_hi_bfreg(dev
, bfregi
); i
< max_bfregs(dev
, bfregi
); i
++) {
536 if (!bfregi
->count
[i
]) {
545 static int alloc_med_class_bfreg(struct mlx5_ib_dev
*dev
,
546 struct mlx5_bfreg_info
*bfregi
)
548 int minidx
= first_med_bfreg(dev
, bfregi
);
554 for (i
= minidx
; i
< first_hi_bfreg(dev
, bfregi
); i
++) {
555 if (bfregi
->count
[i
] < bfregi
->count
[minidx
])
557 if (!bfregi
->count
[minidx
])
561 bfregi
->count
[minidx
]++;
565 static int alloc_bfreg(struct mlx5_ib_dev
*dev
,
566 struct mlx5_bfreg_info
*bfregi
)
568 int bfregn
= -ENOMEM
;
570 mutex_lock(&bfregi
->lock
);
571 if (bfregi
->ver
>= 2) {
572 bfregn
= alloc_high_class_bfreg(dev
, bfregi
);
574 bfregn
= alloc_med_class_bfreg(dev
, bfregi
);
578 BUILD_BUG_ON(NUM_NON_BLUE_FLAME_BFREGS
!= 1);
580 bfregi
->count
[bfregn
]++;
582 mutex_unlock(&bfregi
->lock
);
587 void mlx5_ib_free_bfreg(struct mlx5_ib_dev
*dev
, struct mlx5_bfreg_info
*bfregi
, int bfregn
)
589 mutex_lock(&bfregi
->lock
);
590 bfregi
->count
[bfregn
]--;
591 mutex_unlock(&bfregi
->lock
);
594 static enum mlx5_qp_state
to_mlx5_state(enum ib_qp_state state
)
597 case IB_QPS_RESET
: return MLX5_QP_STATE_RST
;
598 case IB_QPS_INIT
: return MLX5_QP_STATE_INIT
;
599 case IB_QPS_RTR
: return MLX5_QP_STATE_RTR
;
600 case IB_QPS_RTS
: return MLX5_QP_STATE_RTS
;
601 case IB_QPS_SQD
: return MLX5_QP_STATE_SQD
;
602 case IB_QPS_SQE
: return MLX5_QP_STATE_SQER
;
603 case IB_QPS_ERR
: return MLX5_QP_STATE_ERR
;
608 static int to_mlx5_st(enum ib_qp_type type
)
611 case IB_QPT_RC
: return MLX5_QP_ST_RC
;
612 case IB_QPT_UC
: return MLX5_QP_ST_UC
;
613 case IB_QPT_UD
: return MLX5_QP_ST_UD
;
614 case MLX5_IB_QPT_REG_UMR
: return MLX5_QP_ST_REG_UMR
;
616 case IB_QPT_XRC_TGT
: return MLX5_QP_ST_XRC
;
617 case IB_QPT_SMI
: return MLX5_QP_ST_QP0
;
618 case MLX5_IB_QPT_HW_GSI
: return MLX5_QP_ST_QP1
;
619 case MLX5_IB_QPT_DCI
: return MLX5_QP_ST_DCI
;
620 case IB_QPT_RAW_IPV6
: return MLX5_QP_ST_RAW_IPV6
;
621 case IB_QPT_RAW_PACKET
:
622 case IB_QPT_RAW_ETHERTYPE
: return MLX5_QP_ST_RAW_ETHERTYPE
;
624 default: return -EINVAL
;
628 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq
*send_cq
,
629 struct mlx5_ib_cq
*recv_cq
);
630 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq
*send_cq
,
631 struct mlx5_ib_cq
*recv_cq
);
633 int bfregn_to_uar_index(struct mlx5_ib_dev
*dev
,
634 struct mlx5_bfreg_info
*bfregi
, u32 bfregn
,
637 unsigned int bfregs_per_sys_page
;
638 u32 index_of_sys_page
;
641 bfregs_per_sys_page
= get_uars_per_sys_page(dev
, bfregi
->lib_uar_4k
) *
642 MLX5_NON_FP_BFREGS_PER_UAR
;
643 index_of_sys_page
= bfregn
/ bfregs_per_sys_page
;
646 index_of_sys_page
+= bfregi
->num_static_sys_pages
;
648 if (index_of_sys_page
>= bfregi
->num_sys_pages
)
651 if (bfregn
> bfregi
->num_dyn_bfregs
||
652 bfregi
->sys_pages
[index_of_sys_page
] == MLX5_IB_INVALID_UAR_INDEX
) {
653 mlx5_ib_dbg(dev
, "Invalid dynamic uar index\n");
658 offset
= bfregn
% bfregs_per_sys_page
/ MLX5_NON_FP_BFREGS_PER_UAR
;
659 return bfregi
->sys_pages
[index_of_sys_page
] + offset
;
662 static int mlx5_ib_umem_get(struct mlx5_ib_dev
*dev
,
664 unsigned long addr
, size_t size
,
665 struct ib_umem
**umem
,
666 int *npages
, int *page_shift
, int *ncont
,
671 *umem
= ib_umem_get(pd
->uobject
->context
, addr
, size
, 0, 0);
673 mlx5_ib_dbg(dev
, "umem_get failed\n");
674 return PTR_ERR(*umem
);
677 mlx5_ib_cont_pages(*umem
, addr
, 0, npages
, page_shift
, ncont
, NULL
);
679 err
= mlx5_ib_get_buf_offset(addr
, *page_shift
, offset
);
681 mlx5_ib_warn(dev
, "bad offset\n");
685 mlx5_ib_dbg(dev
, "addr 0x%lx, size %zu, npages %d, page_shift %d, ncont %d, offset %d\n",
686 addr
, size
, *npages
, *page_shift
, *ncont
, *offset
);
691 ib_umem_release(*umem
);
697 static void destroy_user_rq(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
698 struct mlx5_ib_rwq
*rwq
)
700 struct mlx5_ib_ucontext
*context
;
702 if (rwq
->create_flags
& MLX5_IB_WQ_FLAGS_DELAY_DROP
)
703 atomic_dec(&dev
->delay_drop
.rqs_cnt
);
705 context
= to_mucontext(pd
->uobject
->context
);
706 mlx5_ib_db_unmap_user(context
, &rwq
->db
);
708 ib_umem_release(rwq
->umem
);
711 static int create_user_rq(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
712 struct mlx5_ib_rwq
*rwq
,
713 struct mlx5_ib_create_wq
*ucmd
)
715 struct mlx5_ib_ucontext
*context
;
725 context
= to_mucontext(pd
->uobject
->context
);
726 rwq
->umem
= ib_umem_get(pd
->uobject
->context
, ucmd
->buf_addr
,
727 rwq
->buf_size
, 0, 0);
728 if (IS_ERR(rwq
->umem
)) {
729 mlx5_ib_dbg(dev
, "umem_get failed\n");
730 err
= PTR_ERR(rwq
->umem
);
734 mlx5_ib_cont_pages(rwq
->umem
, ucmd
->buf_addr
, 0, &npages
, &page_shift
,
736 err
= mlx5_ib_get_buf_offset(ucmd
->buf_addr
, page_shift
,
737 &rwq
->rq_page_offset
);
739 mlx5_ib_warn(dev
, "bad offset\n");
743 rwq
->rq_num_pas
= ncont
;
744 rwq
->page_shift
= page_shift
;
745 rwq
->log_page_size
= page_shift
- MLX5_ADAPTER_PAGE_SHIFT
;
746 rwq
->wq_sig
= !!(ucmd
->flags
& MLX5_WQ_FLAG_SIGNATURE
);
748 mlx5_ib_dbg(dev
, "addr 0x%llx, size %zd, npages %d, page_shift %d, ncont %d, offset %d\n",
749 (unsigned long long)ucmd
->buf_addr
, rwq
->buf_size
,
750 npages
, page_shift
, ncont
, offset
);
752 err
= mlx5_ib_db_map_user(context
, ucmd
->db_addr
, &rwq
->db
);
754 mlx5_ib_dbg(dev
, "map failed\n");
758 rwq
->create_type
= MLX5_WQ_USER
;
762 ib_umem_release(rwq
->umem
);
766 static int adjust_bfregn(struct mlx5_ib_dev
*dev
,
767 struct mlx5_bfreg_info
*bfregi
, int bfregn
)
769 return bfregn
/ MLX5_NON_FP_BFREGS_PER_UAR
* MLX5_BFREGS_PER_UAR
+
770 bfregn
% MLX5_NON_FP_BFREGS_PER_UAR
;
773 static int create_user_qp(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
774 struct mlx5_ib_qp
*qp
, struct ib_udata
*udata
,
775 struct ib_qp_init_attr
*attr
,
777 struct mlx5_ib_create_qp_resp
*resp
, int *inlen
,
778 struct mlx5_ib_qp_base
*base
)
780 struct mlx5_ib_ucontext
*context
;
781 struct mlx5_ib_create_qp ucmd
;
782 struct mlx5_ib_ubuffer
*ubuffer
= &base
->ubuffer
;
793 err
= ib_copy_from_udata(&ucmd
, udata
, sizeof(ucmd
));
795 mlx5_ib_dbg(dev
, "copy failed\n");
799 context
= to_mucontext(pd
->uobject
->context
);
800 if (ucmd
.flags
& MLX5_QP_FLAG_BFREG_INDEX
) {
801 uar_index
= bfregn_to_uar_index(dev
, &context
->bfregi
,
802 ucmd
.bfreg_index
, true);
806 bfregn
= MLX5_IB_INVALID_BFREG
;
807 } else if (qp
->flags
& MLX5_IB_QP_CROSS_CHANNEL
) {
809 * TBD: should come from the verbs when we have the API
811 /* In CROSS_CHANNEL CQ and QP must use the same UAR */
812 bfregn
= MLX5_CROSS_CHANNEL_BFREG
;
815 bfregn
= alloc_bfreg(dev
, &context
->bfregi
);
820 mlx5_ib_dbg(dev
, "bfregn 0x%x, uar_index 0x%x\n", bfregn
, uar_index
);
821 if (bfregn
!= MLX5_IB_INVALID_BFREG
)
822 uar_index
= bfregn_to_uar_index(dev
, &context
->bfregi
, bfregn
,
826 qp
->sq
.wqe_shift
= ilog2(MLX5_SEND_WQE_BB
);
827 qp
->sq
.offset
= qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
;
829 err
= set_user_buf_size(dev
, qp
, &ucmd
, base
, attr
);
833 if (ucmd
.buf_addr
&& ubuffer
->buf_size
) {
834 ubuffer
->buf_addr
= ucmd
.buf_addr
;
835 err
= mlx5_ib_umem_get(dev
, pd
, ubuffer
->buf_addr
,
837 &ubuffer
->umem
, &npages
, &page_shift
,
842 ubuffer
->umem
= NULL
;
845 *inlen
= MLX5_ST_SZ_BYTES(create_qp_in
) +
846 MLX5_FLD_SZ_BYTES(create_qp_in
, pas
[0]) * ncont
;
847 *in
= kvzalloc(*inlen
, GFP_KERNEL
);
853 pas
= (__be64
*)MLX5_ADDR_OF(create_qp_in
, *in
, pas
);
855 mlx5_ib_populate_pas(dev
, ubuffer
->umem
, page_shift
, pas
, 0);
857 qpc
= MLX5_ADDR_OF(create_qp_in
, *in
, qpc
);
859 MLX5_SET(qpc
, qpc
, log_page_size
, page_shift
- MLX5_ADAPTER_PAGE_SHIFT
);
860 MLX5_SET(qpc
, qpc
, page_offset
, offset
);
862 MLX5_SET(qpc
, qpc
, uar_page
, uar_index
);
863 if (bfregn
!= MLX5_IB_INVALID_BFREG
)
864 resp
->bfreg_index
= adjust_bfregn(dev
, &context
->bfregi
, bfregn
);
866 resp
->bfreg_index
= MLX5_IB_INVALID_BFREG
;
869 err
= mlx5_ib_db_map_user(context
, ucmd
.db_addr
, &qp
->db
);
871 mlx5_ib_dbg(dev
, "map failed\n");
875 err
= ib_copy_to_udata(udata
, resp
, min(udata
->outlen
, sizeof(*resp
)));
877 mlx5_ib_dbg(dev
, "copy failed\n");
880 qp
->create_type
= MLX5_QP_USER
;
885 mlx5_ib_db_unmap_user(context
, &qp
->db
);
892 ib_umem_release(ubuffer
->umem
);
895 if (bfregn
!= MLX5_IB_INVALID_BFREG
)
896 mlx5_ib_free_bfreg(dev
, &context
->bfregi
, bfregn
);
900 static void destroy_qp_user(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
901 struct mlx5_ib_qp
*qp
, struct mlx5_ib_qp_base
*base
)
903 struct mlx5_ib_ucontext
*context
;
905 context
= to_mucontext(pd
->uobject
->context
);
906 mlx5_ib_db_unmap_user(context
, &qp
->db
);
907 if (base
->ubuffer
.umem
)
908 ib_umem_release(base
->ubuffer
.umem
);
911 * Free only the BFREGs which are handled by the kernel.
912 * BFREGs of UARs allocated dynamically are handled by user.
914 if (qp
->bfregn
!= MLX5_IB_INVALID_BFREG
)
915 mlx5_ib_free_bfreg(dev
, &context
->bfregi
, qp
->bfregn
);
918 static int create_kernel_qp(struct mlx5_ib_dev
*dev
,
919 struct ib_qp_init_attr
*init_attr
,
920 struct mlx5_ib_qp
*qp
,
921 u32
**in
, int *inlen
,
922 struct mlx5_ib_qp_base
*base
)
928 if (init_attr
->create_flags
& ~(IB_QP_CREATE_SIGNATURE_EN
|
929 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
|
930 IB_QP_CREATE_IPOIB_UD_LSO
|
931 IB_QP_CREATE_NETIF_QP
|
932 mlx5_ib_create_qp_sqpn_qp1()))
935 if (init_attr
->qp_type
== MLX5_IB_QPT_REG_UMR
)
936 qp
->bf
.bfreg
= &dev
->fp_bfreg
;
938 qp
->bf
.bfreg
= &dev
->bfreg
;
940 /* We need to divide by two since each register is comprised of
941 * two buffers of identical size, namely odd and even
943 qp
->bf
.buf_size
= (1 << MLX5_CAP_GEN(dev
->mdev
, log_bf_reg_size
)) / 2;
944 uar_index
= qp
->bf
.bfreg
->index
;
946 err
= calc_sq_size(dev
, init_attr
, qp
);
948 mlx5_ib_dbg(dev
, "err %d\n", err
);
953 qp
->sq
.offset
= qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
;
954 base
->ubuffer
.buf_size
= err
+ (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
);
956 err
= mlx5_buf_alloc(dev
->mdev
, base
->ubuffer
.buf_size
, &qp
->buf
);
958 mlx5_ib_dbg(dev
, "err %d\n", err
);
962 qp
->sq
.qend
= mlx5_get_send_wqe(qp
, qp
->sq
.wqe_cnt
);
963 *inlen
= MLX5_ST_SZ_BYTES(create_qp_in
) +
964 MLX5_FLD_SZ_BYTES(create_qp_in
, pas
[0]) * qp
->buf
.npages
;
965 *in
= kvzalloc(*inlen
, GFP_KERNEL
);
971 qpc
= MLX5_ADDR_OF(create_qp_in
, *in
, qpc
);
972 MLX5_SET(qpc
, qpc
, uar_page
, uar_index
);
973 MLX5_SET(qpc
, qpc
, log_page_size
, qp
->buf
.page_shift
- MLX5_ADAPTER_PAGE_SHIFT
);
975 /* Set "fast registration enabled" for all kernel QPs */
976 MLX5_SET(qpc
, qpc
, fre
, 1);
977 MLX5_SET(qpc
, qpc
, rlky
, 1);
979 if (init_attr
->create_flags
& mlx5_ib_create_qp_sqpn_qp1()) {
980 MLX5_SET(qpc
, qpc
, deth_sqpn
, 1);
981 qp
->flags
|= MLX5_IB_QP_SQPN_QP1
;
984 mlx5_fill_page_array(&qp
->buf
,
985 (__be64
*)MLX5_ADDR_OF(create_qp_in
, *in
, pas
));
987 err
= mlx5_db_alloc(dev
->mdev
, &qp
->db
);
989 mlx5_ib_dbg(dev
, "err %d\n", err
);
993 qp
->sq
.wrid
= kvmalloc_array(qp
->sq
.wqe_cnt
,
994 sizeof(*qp
->sq
.wrid
), GFP_KERNEL
);
995 qp
->sq
.wr_data
= kvmalloc_array(qp
->sq
.wqe_cnt
,
996 sizeof(*qp
->sq
.wr_data
), GFP_KERNEL
);
997 qp
->rq
.wrid
= kvmalloc_array(qp
->rq
.wqe_cnt
,
998 sizeof(*qp
->rq
.wrid
), GFP_KERNEL
);
999 qp
->sq
.w_list
= kvmalloc_array(qp
->sq
.wqe_cnt
,
1000 sizeof(*qp
->sq
.w_list
), GFP_KERNEL
);
1001 qp
->sq
.wqe_head
= kvmalloc_array(qp
->sq
.wqe_cnt
,
1002 sizeof(*qp
->sq
.wqe_head
), GFP_KERNEL
);
1004 if (!qp
->sq
.wrid
|| !qp
->sq
.wr_data
|| !qp
->rq
.wrid
||
1005 !qp
->sq
.w_list
|| !qp
->sq
.wqe_head
) {
1009 qp
->create_type
= MLX5_QP_KERNEL
;
1014 kvfree(qp
->sq
.wqe_head
);
1015 kvfree(qp
->sq
.w_list
);
1016 kvfree(qp
->sq
.wrid
);
1017 kvfree(qp
->sq
.wr_data
);
1018 kvfree(qp
->rq
.wrid
);
1019 mlx5_db_free(dev
->mdev
, &qp
->db
);
1025 mlx5_buf_free(dev
->mdev
, &qp
->buf
);
1029 static void destroy_qp_kernel(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
)
1031 kvfree(qp
->sq
.wqe_head
);
1032 kvfree(qp
->sq
.w_list
);
1033 kvfree(qp
->sq
.wrid
);
1034 kvfree(qp
->sq
.wr_data
);
1035 kvfree(qp
->rq
.wrid
);
1036 mlx5_db_free(dev
->mdev
, &qp
->db
);
1037 mlx5_buf_free(dev
->mdev
, &qp
->buf
);
1040 static u32
get_rx_type(struct mlx5_ib_qp
*qp
, struct ib_qp_init_attr
*attr
)
1042 if (attr
->srq
|| (attr
->qp_type
== IB_QPT_XRC_TGT
) ||
1043 (attr
->qp_type
== MLX5_IB_QPT_DCI
) ||
1044 (attr
->qp_type
== IB_QPT_XRC_INI
))
1046 else if (!qp
->has_rq
)
1047 return MLX5_ZERO_LEN_RQ
;
1049 return MLX5_NON_ZERO_RQ
;
1052 static int is_connected(enum ib_qp_type qp_type
)
1054 if (qp_type
== IB_QPT_RC
|| qp_type
== IB_QPT_UC
)
1060 static int create_raw_packet_qp_tis(struct mlx5_ib_dev
*dev
,
1061 struct mlx5_ib_qp
*qp
,
1062 struct mlx5_ib_sq
*sq
, u32 tdn
)
1064 u32 in
[MLX5_ST_SZ_DW(create_tis_in
)] = {0};
1065 void *tisc
= MLX5_ADDR_OF(create_tis_in
, in
, ctx
);
1067 MLX5_SET(tisc
, tisc
, transport_domain
, tdn
);
1068 if (qp
->flags
& MLX5_IB_QP_UNDERLAY
)
1069 MLX5_SET(tisc
, tisc
, underlay_qpn
, qp
->underlay_qpn
);
1071 return mlx5_core_create_tis(dev
->mdev
, in
, sizeof(in
), &sq
->tisn
);
1074 static void destroy_raw_packet_qp_tis(struct mlx5_ib_dev
*dev
,
1075 struct mlx5_ib_sq
*sq
)
1077 mlx5_core_destroy_tis(dev
->mdev
, sq
->tisn
);
1080 static void destroy_flow_rule_vport_sq(struct mlx5_ib_dev
*dev
,
1081 struct mlx5_ib_sq
*sq
)
1084 mlx5_del_flow_rules(sq
->flow_rule
);
1087 static int create_raw_packet_qp_sq(struct mlx5_ib_dev
*dev
,
1088 struct mlx5_ib_sq
*sq
, void *qpin
,
1091 struct mlx5_ib_ubuffer
*ubuffer
= &sq
->ubuffer
;
1095 void *qpc
= MLX5_ADDR_OF(create_qp_in
, qpin
, qpc
);
1104 err
= mlx5_ib_umem_get(dev
, pd
, ubuffer
->buf_addr
, ubuffer
->buf_size
,
1105 &sq
->ubuffer
.umem
, &npages
, &page_shift
,
1110 inlen
= MLX5_ST_SZ_BYTES(create_sq_in
) + sizeof(u64
) * ncont
;
1111 in
= kvzalloc(inlen
, GFP_KERNEL
);
1117 sqc
= MLX5_ADDR_OF(create_sq_in
, in
, ctx
);
1118 MLX5_SET(sqc
, sqc
, flush_in_error_en
, 1);
1119 if (MLX5_CAP_ETH(dev
->mdev
, multi_pkt_send_wqe
))
1120 MLX5_SET(sqc
, sqc
, allow_multi_pkt_send_wqe
, 1);
1121 MLX5_SET(sqc
, sqc
, state
, MLX5_SQC_STATE_RST
);
1122 MLX5_SET(sqc
, sqc
, user_index
, MLX5_GET(qpc
, qpc
, user_index
));
1123 MLX5_SET(sqc
, sqc
, cqn
, MLX5_GET(qpc
, qpc
, cqn_snd
));
1124 MLX5_SET(sqc
, sqc
, tis_lst_sz
, 1);
1125 MLX5_SET(sqc
, sqc
, tis_num_0
, sq
->tisn
);
1126 if (MLX5_CAP_GEN(dev
->mdev
, eth_net_offloads
) &&
1127 MLX5_CAP_ETH(dev
->mdev
, swp
))
1128 MLX5_SET(sqc
, sqc
, allow_swp
, 1);
1130 wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
1131 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_CYCLIC
);
1132 MLX5_SET(wq
, wq
, pd
, MLX5_GET(qpc
, qpc
, pd
));
1133 MLX5_SET(wq
, wq
, uar_page
, MLX5_GET(qpc
, qpc
, uar_page
));
1134 MLX5_SET64(wq
, wq
, dbr_addr
, MLX5_GET64(qpc
, qpc
, dbr_addr
));
1135 MLX5_SET(wq
, wq
, log_wq_stride
, ilog2(MLX5_SEND_WQE_BB
));
1136 MLX5_SET(wq
, wq
, log_wq_sz
, MLX5_GET(qpc
, qpc
, log_sq_size
));
1137 MLX5_SET(wq
, wq
, log_wq_pg_sz
, page_shift
- MLX5_ADAPTER_PAGE_SHIFT
);
1138 MLX5_SET(wq
, wq
, page_offset
, offset
);
1140 pas
= (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
);
1141 mlx5_ib_populate_pas(dev
, sq
->ubuffer
.umem
, page_shift
, pas
, 0);
1143 err
= mlx5_core_create_sq_tracked(dev
->mdev
, in
, inlen
, &sq
->base
.mqp
);
1150 err
= create_flow_rule_vport_sq(dev
, sq
);
1157 mlx5_core_destroy_sq_tracked(dev
->mdev
, &sq
->base
.mqp
);
1160 ib_umem_release(sq
->ubuffer
.umem
);
1161 sq
->ubuffer
.umem
= NULL
;
1166 static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev
*dev
,
1167 struct mlx5_ib_sq
*sq
)
1169 destroy_flow_rule_vport_sq(dev
, sq
);
1170 mlx5_core_destroy_sq_tracked(dev
->mdev
, &sq
->base
.mqp
);
1171 ib_umem_release(sq
->ubuffer
.umem
);
1174 static size_t get_rq_pas_size(void *qpc
)
1176 u32 log_page_size
= MLX5_GET(qpc
, qpc
, log_page_size
) + 12;
1177 u32 log_rq_stride
= MLX5_GET(qpc
, qpc
, log_rq_stride
);
1178 u32 log_rq_size
= MLX5_GET(qpc
, qpc
, log_rq_size
);
1179 u32 page_offset
= MLX5_GET(qpc
, qpc
, page_offset
);
1180 u32 po_quanta
= 1 << (log_page_size
- 6);
1181 u32 rq_sz
= 1 << (log_rq_size
+ 4 + log_rq_stride
);
1182 u32 page_size
= 1 << log_page_size
;
1183 u32 rq_sz_po
= rq_sz
+ (page_offset
* po_quanta
);
1184 u32 rq_num_pas
= (rq_sz_po
+ page_size
- 1) / page_size
;
1186 return rq_num_pas
* sizeof(u64
);
1189 static int create_raw_packet_qp_rq(struct mlx5_ib_dev
*dev
,
1190 struct mlx5_ib_rq
*rq
, void *qpin
,
1193 struct mlx5_ib_qp
*mqp
= rq
->base
.container_mibqp
;
1199 void *qpc
= MLX5_ADDR_OF(create_qp_in
, qpin
, qpc
);
1200 size_t rq_pas_size
= get_rq_pas_size(qpc
);
1204 if (qpinlen
< rq_pas_size
+ MLX5_BYTE_OFF(create_qp_in
, pas
))
1207 inlen
= MLX5_ST_SZ_BYTES(create_rq_in
) + rq_pas_size
;
1208 in
= kvzalloc(inlen
, GFP_KERNEL
);
1212 rqc
= MLX5_ADDR_OF(create_rq_in
, in
, ctx
);
1213 if (!(rq
->flags
& MLX5_IB_RQ_CVLAN_STRIPPING
))
1214 MLX5_SET(rqc
, rqc
, vsd
, 1);
1215 MLX5_SET(rqc
, rqc
, mem_rq_type
, MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE
);
1216 MLX5_SET(rqc
, rqc
, state
, MLX5_RQC_STATE_RST
);
1217 MLX5_SET(rqc
, rqc
, flush_in_error_en
, 1);
1218 MLX5_SET(rqc
, rqc
, user_index
, MLX5_GET(qpc
, qpc
, user_index
));
1219 MLX5_SET(rqc
, rqc
, cqn
, MLX5_GET(qpc
, qpc
, cqn_rcv
));
1221 if (mqp
->flags
& MLX5_IB_QP_CAP_SCATTER_FCS
)
1222 MLX5_SET(rqc
, rqc
, scatter_fcs
, 1);
1224 wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
1225 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_CYCLIC
);
1226 if (rq
->flags
& MLX5_IB_RQ_PCI_WRITE_END_PADDING
)
1227 MLX5_SET(wq
, wq
, end_padding_mode
, MLX5_WQ_END_PAD_MODE_ALIGN
);
1228 MLX5_SET(wq
, wq
, page_offset
, MLX5_GET(qpc
, qpc
, page_offset
));
1229 MLX5_SET(wq
, wq
, pd
, MLX5_GET(qpc
, qpc
, pd
));
1230 MLX5_SET64(wq
, wq
, dbr_addr
, MLX5_GET64(qpc
, qpc
, dbr_addr
));
1231 MLX5_SET(wq
, wq
, log_wq_stride
, MLX5_GET(qpc
, qpc
, log_rq_stride
) + 4);
1232 MLX5_SET(wq
, wq
, log_wq_pg_sz
, MLX5_GET(qpc
, qpc
, log_page_size
));
1233 MLX5_SET(wq
, wq
, log_wq_sz
, MLX5_GET(qpc
, qpc
, log_rq_size
));
1235 pas
= (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
);
1236 qp_pas
= (__be64
*)MLX5_ADDR_OF(create_qp_in
, qpin
, pas
);
1237 memcpy(pas
, qp_pas
, rq_pas_size
);
1239 err
= mlx5_core_create_rq_tracked(dev
->mdev
, in
, inlen
, &rq
->base
.mqp
);
1246 static void destroy_raw_packet_qp_rq(struct mlx5_ib_dev
*dev
,
1247 struct mlx5_ib_rq
*rq
)
1249 mlx5_core_destroy_rq_tracked(dev
->mdev
, &rq
->base
.mqp
);
1252 static bool tunnel_offload_supported(struct mlx5_core_dev
*dev
)
1254 return (MLX5_CAP_ETH(dev
, tunnel_stateless_vxlan
) ||
1255 MLX5_CAP_ETH(dev
, tunnel_stateless_gre
) ||
1256 MLX5_CAP_ETH(dev
, tunnel_stateless_geneve_rx
));
1259 static int create_raw_packet_qp_tir(struct mlx5_ib_dev
*dev
,
1260 struct mlx5_ib_rq
*rq
, u32 tdn
,
1269 inlen
= MLX5_ST_SZ_BYTES(create_tir_in
);
1270 in
= kvzalloc(inlen
, GFP_KERNEL
);
1274 tirc
= MLX5_ADDR_OF(create_tir_in
, in
, ctx
);
1275 MLX5_SET(tirc
, tirc
, disp_type
, MLX5_TIRC_DISP_TYPE_DIRECT
);
1276 MLX5_SET(tirc
, tirc
, inline_rqn
, rq
->base
.mqp
.qpn
);
1277 MLX5_SET(tirc
, tirc
, transport_domain
, tdn
);
1278 if (*qp_flags_en
& MLX5_QP_FLAG_TUNNEL_OFFLOADS
)
1279 MLX5_SET(tirc
, tirc
, tunneled_offload_en
, 1);
1281 if (*qp_flags_en
& MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC
)
1282 lb_flag
|= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST
;
1284 if (*qp_flags_en
& MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC
)
1285 lb_flag
|= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST
;
1288 lb_flag
|= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST
;
1289 *qp_flags_en
|= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC
;
1292 MLX5_SET(tirc
, tirc
, self_lb_block
, lb_flag
);
1294 err
= mlx5_core_create_tir(dev
->mdev
, in
, inlen
, &rq
->tirn
);
1301 static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev
*dev
,
1302 struct mlx5_ib_rq
*rq
)
1304 mlx5_core_destroy_tir(dev
->mdev
, rq
->tirn
);
1307 static int create_raw_packet_qp(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
1308 u32
*in
, size_t inlen
,
1311 struct mlx5_ib_raw_packet_qp
*raw_packet_qp
= &qp
->raw_packet_qp
;
1312 struct mlx5_ib_sq
*sq
= &raw_packet_qp
->sq
;
1313 struct mlx5_ib_rq
*rq
= &raw_packet_qp
->rq
;
1314 struct ib_uobject
*uobj
= pd
->uobject
;
1315 struct ib_ucontext
*ucontext
= uobj
->context
;
1316 struct mlx5_ib_ucontext
*mucontext
= to_mucontext(ucontext
);
1318 u32 tdn
= mucontext
->tdn
;
1320 if (qp
->sq
.wqe_cnt
) {
1321 err
= create_raw_packet_qp_tis(dev
, qp
, sq
, tdn
);
1325 err
= create_raw_packet_qp_sq(dev
, sq
, in
, pd
);
1327 goto err_destroy_tis
;
1329 sq
->base
.container_mibqp
= qp
;
1330 sq
->base
.mqp
.event
= mlx5_ib_qp_event
;
1333 if (qp
->rq
.wqe_cnt
) {
1334 rq
->base
.container_mibqp
= qp
;
1336 if (qp
->flags
& MLX5_IB_QP_CVLAN_STRIPPING
)
1337 rq
->flags
|= MLX5_IB_RQ_CVLAN_STRIPPING
;
1338 if (qp
->flags
& MLX5_IB_QP_PCI_WRITE_END_PADDING
)
1339 rq
->flags
|= MLX5_IB_RQ_PCI_WRITE_END_PADDING
;
1340 err
= create_raw_packet_qp_rq(dev
, rq
, in
, inlen
);
1342 goto err_destroy_sq
;
1345 err
= create_raw_packet_qp_tir(dev
, rq
, tdn
, &qp
->flags_en
);
1347 goto err_destroy_rq
;
1350 qp
->trans_qp
.base
.mqp
.qpn
= qp
->sq
.wqe_cnt
? sq
->base
.mqp
.qpn
:
1356 destroy_raw_packet_qp_rq(dev
, rq
);
1358 if (!qp
->sq
.wqe_cnt
)
1360 destroy_raw_packet_qp_sq(dev
, sq
);
1362 destroy_raw_packet_qp_tis(dev
, sq
);
1367 static void destroy_raw_packet_qp(struct mlx5_ib_dev
*dev
,
1368 struct mlx5_ib_qp
*qp
)
1370 struct mlx5_ib_raw_packet_qp
*raw_packet_qp
= &qp
->raw_packet_qp
;
1371 struct mlx5_ib_sq
*sq
= &raw_packet_qp
->sq
;
1372 struct mlx5_ib_rq
*rq
= &raw_packet_qp
->rq
;
1374 if (qp
->rq
.wqe_cnt
) {
1375 destroy_raw_packet_qp_tir(dev
, rq
);
1376 destroy_raw_packet_qp_rq(dev
, rq
);
1379 if (qp
->sq
.wqe_cnt
) {
1380 destroy_raw_packet_qp_sq(dev
, sq
);
1381 destroy_raw_packet_qp_tis(dev
, sq
);
1385 static void raw_packet_qp_copy_info(struct mlx5_ib_qp
*qp
,
1386 struct mlx5_ib_raw_packet_qp
*raw_packet_qp
)
1388 struct mlx5_ib_sq
*sq
= &raw_packet_qp
->sq
;
1389 struct mlx5_ib_rq
*rq
= &raw_packet_qp
->rq
;
1393 sq
->doorbell
= &qp
->db
;
1394 rq
->doorbell
= &qp
->db
;
1397 static void destroy_rss_raw_qp_tir(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
)
1399 mlx5_core_destroy_tir(dev
->mdev
, qp
->rss_qp
.tirn
);
1402 static int create_rss_raw_qp_tir(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
1404 struct ib_qp_init_attr
*init_attr
,
1405 struct ib_udata
*udata
)
1407 struct ib_uobject
*uobj
= pd
->uobject
;
1408 struct ib_ucontext
*ucontext
= uobj
->context
;
1409 struct mlx5_ib_ucontext
*mucontext
= to_mucontext(ucontext
);
1410 struct mlx5_ib_create_qp_resp resp
= {};
1416 u32 selected_fields
= 0;
1418 size_t min_resp_len
;
1419 u32 tdn
= mucontext
->tdn
;
1420 struct mlx5_ib_create_qp_rss ucmd
= {};
1421 size_t required_cmd_sz
;
1424 if (init_attr
->qp_type
!= IB_QPT_RAW_PACKET
)
1427 if (init_attr
->create_flags
|| init_attr
->send_cq
)
1430 min_resp_len
= offsetof(typeof(resp
), bfreg_index
) + sizeof(resp
.bfreg_index
);
1431 if (udata
->outlen
< min_resp_len
)
1434 required_cmd_sz
= offsetof(typeof(ucmd
), flags
) + sizeof(ucmd
.flags
);
1435 if (udata
->inlen
< required_cmd_sz
) {
1436 mlx5_ib_dbg(dev
, "invalid inlen\n");
1440 if (udata
->inlen
> sizeof(ucmd
) &&
1441 !ib_is_udata_cleared(udata
, sizeof(ucmd
),
1442 udata
->inlen
- sizeof(ucmd
))) {
1443 mlx5_ib_dbg(dev
, "inlen is not supported\n");
1447 if (ib_copy_from_udata(&ucmd
, udata
, min(sizeof(ucmd
), udata
->inlen
))) {
1448 mlx5_ib_dbg(dev
, "copy failed\n");
1452 if (ucmd
.comp_mask
) {
1453 mlx5_ib_dbg(dev
, "invalid comp mask\n");
1457 if (ucmd
.flags
& ~(MLX5_QP_FLAG_TUNNEL_OFFLOADS
|
1458 MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC
|
1459 MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC
)) {
1460 mlx5_ib_dbg(dev
, "invalid flags\n");
1464 if (ucmd
.flags
& MLX5_QP_FLAG_TUNNEL_OFFLOADS
&&
1465 !tunnel_offload_supported(dev
->mdev
)) {
1466 mlx5_ib_dbg(dev
, "tunnel offloads isn't supported\n");
1470 if (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_INNER
&&
1471 !(ucmd
.flags
& MLX5_QP_FLAG_TUNNEL_OFFLOADS
)) {
1472 mlx5_ib_dbg(dev
, "Tunnel offloads must be set for inner RSS\n");
1476 if (ucmd
.flags
& MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC
|| dev
->rep
) {
1477 lb_flag
|= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST
;
1478 qp
->flags_en
|= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC
;
1481 if (ucmd
.flags
& MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC
) {
1482 lb_flag
|= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST
;
1483 qp
->flags_en
|= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC
;
1486 err
= ib_copy_to_udata(udata
, &resp
, min(udata
->outlen
, sizeof(resp
)));
1488 mlx5_ib_dbg(dev
, "copy failed\n");
1492 inlen
= MLX5_ST_SZ_BYTES(create_tir_in
);
1493 in
= kvzalloc(inlen
, GFP_KERNEL
);
1497 tirc
= MLX5_ADDR_OF(create_tir_in
, in
, ctx
);
1498 MLX5_SET(tirc
, tirc
, disp_type
,
1499 MLX5_TIRC_DISP_TYPE_INDIRECT
);
1500 MLX5_SET(tirc
, tirc
, indirect_table
,
1501 init_attr
->rwq_ind_tbl
->ind_tbl_num
);
1502 MLX5_SET(tirc
, tirc
, transport_domain
, tdn
);
1504 hfso
= MLX5_ADDR_OF(tirc
, tirc
, rx_hash_field_selector_outer
);
1506 if (ucmd
.flags
& MLX5_QP_FLAG_TUNNEL_OFFLOADS
)
1507 MLX5_SET(tirc
, tirc
, tunneled_offload_en
, 1);
1509 MLX5_SET(tirc
, tirc
, self_lb_block
, lb_flag
);
1511 if (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_INNER
)
1512 hfso
= MLX5_ADDR_OF(tirc
, tirc
, rx_hash_field_selector_inner
);
1514 hfso
= MLX5_ADDR_OF(tirc
, tirc
, rx_hash_field_selector_outer
);
1516 switch (ucmd
.rx_hash_function
) {
1517 case MLX5_RX_HASH_FUNC_TOEPLITZ
:
1519 void *rss_key
= MLX5_ADDR_OF(tirc
, tirc
, rx_hash_toeplitz_key
);
1520 size_t len
= MLX5_FLD_SZ_BYTES(tirc
, rx_hash_toeplitz_key
);
1522 if (len
!= ucmd
.rx_key_len
) {
1527 MLX5_SET(tirc
, tirc
, rx_hash_fn
, MLX5_RX_HASH_FN_TOEPLITZ
);
1528 MLX5_SET(tirc
, tirc
, rx_hash_symmetric
, 1);
1529 memcpy(rss_key
, ucmd
.rx_hash_key
, len
);
1537 if (!ucmd
.rx_hash_fields_mask
) {
1538 /* special case when this TIR serves as steering entry without hashing */
1539 if (!init_attr
->rwq_ind_tbl
->log_ind_tbl_size
)
1545 if (((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_IPV4
) ||
1546 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_IPV4
)) &&
1547 ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_IPV6
) ||
1548 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_IPV6
))) {
1553 /* If none of IPV4 & IPV6 SRC/DST was set - this bit field is ignored */
1554 if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_IPV4
) ||
1555 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_IPV4
))
1556 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1557 MLX5_L3_PROT_TYPE_IPV4
);
1558 else if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_IPV6
) ||
1559 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_IPV6
))
1560 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1561 MLX5_L3_PROT_TYPE_IPV6
);
1563 outer_l4
= ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_PORT_TCP
) ||
1564 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_PORT_TCP
)) << 0 |
1565 ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_PORT_UDP
) ||
1566 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_PORT_UDP
)) << 1 |
1567 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_IPSEC_SPI
) << 2;
1569 /* Check that only one l4 protocol is set */
1570 if (outer_l4
& (outer_l4
- 1)) {
1575 /* If none of TCP & UDP SRC/DST was set - this bit field is ignored */
1576 if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_PORT_TCP
) ||
1577 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_PORT_TCP
))
1578 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
1579 MLX5_L4_PROT_TYPE_TCP
);
1580 else if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_PORT_UDP
) ||
1581 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_PORT_UDP
))
1582 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
1583 MLX5_L4_PROT_TYPE_UDP
);
1585 if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_IPV4
) ||
1586 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_IPV6
))
1587 selected_fields
|= MLX5_HASH_FIELD_SEL_SRC_IP
;
1589 if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_IPV4
) ||
1590 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_IPV6
))
1591 selected_fields
|= MLX5_HASH_FIELD_SEL_DST_IP
;
1593 if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_PORT_TCP
) ||
1594 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_PORT_UDP
))
1595 selected_fields
|= MLX5_HASH_FIELD_SEL_L4_SPORT
;
1597 if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_PORT_TCP
) ||
1598 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_PORT_UDP
))
1599 selected_fields
|= MLX5_HASH_FIELD_SEL_L4_DPORT
;
1601 if (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_IPSEC_SPI
)
1602 selected_fields
|= MLX5_HASH_FIELD_SEL_IPSEC_SPI
;
1604 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
, selected_fields
);
1607 err
= mlx5_core_create_tir(dev
->mdev
, in
, inlen
, &qp
->rss_qp
.tirn
);
1613 /* qpn is reserved for that QP */
1614 qp
->trans_qp
.base
.mqp
.qpn
= 0;
1615 qp
->flags
|= MLX5_IB_QP_RSS
;
1623 static int create_qp_common(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
1624 struct ib_qp_init_attr
*init_attr
,
1625 struct ib_udata
*udata
, struct mlx5_ib_qp
*qp
)
1627 struct mlx5_ib_resources
*devr
= &dev
->devr
;
1628 int inlen
= MLX5_ST_SZ_BYTES(create_qp_in
);
1629 struct mlx5_core_dev
*mdev
= dev
->mdev
;
1630 struct mlx5_ib_create_qp_resp resp
= {};
1631 struct mlx5_ib_cq
*send_cq
;
1632 struct mlx5_ib_cq
*recv_cq
;
1633 unsigned long flags
;
1634 u32 uidx
= MLX5_IB_DEFAULT_UIDX
;
1635 struct mlx5_ib_create_qp ucmd
;
1636 struct mlx5_ib_qp_base
*base
;
1642 mutex_init(&qp
->mutex
);
1643 spin_lock_init(&qp
->sq
.lock
);
1644 spin_lock_init(&qp
->rq
.lock
);
1646 mlx5_st
= to_mlx5_st(init_attr
->qp_type
);
1650 if (init_attr
->rwq_ind_tbl
) {
1654 err
= create_rss_raw_qp_tir(dev
, qp
, pd
, init_attr
, udata
);
1658 if (init_attr
->create_flags
& IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
) {
1659 if (!MLX5_CAP_GEN(mdev
, block_lb_mc
)) {
1660 mlx5_ib_dbg(dev
, "block multicast loopback isn't supported\n");
1663 qp
->flags
|= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK
;
1667 if (init_attr
->create_flags
&
1668 (IB_QP_CREATE_CROSS_CHANNEL
|
1669 IB_QP_CREATE_MANAGED_SEND
|
1670 IB_QP_CREATE_MANAGED_RECV
)) {
1671 if (!MLX5_CAP_GEN(mdev
, cd
)) {
1672 mlx5_ib_dbg(dev
, "cross-channel isn't supported\n");
1675 if (init_attr
->create_flags
& IB_QP_CREATE_CROSS_CHANNEL
)
1676 qp
->flags
|= MLX5_IB_QP_CROSS_CHANNEL
;
1677 if (init_attr
->create_flags
& IB_QP_CREATE_MANAGED_SEND
)
1678 qp
->flags
|= MLX5_IB_QP_MANAGED_SEND
;
1679 if (init_attr
->create_flags
& IB_QP_CREATE_MANAGED_RECV
)
1680 qp
->flags
|= MLX5_IB_QP_MANAGED_RECV
;
1683 if (init_attr
->qp_type
== IB_QPT_UD
&&
1684 (init_attr
->create_flags
& IB_QP_CREATE_IPOIB_UD_LSO
))
1685 if (!MLX5_CAP_GEN(mdev
, ipoib_basic_offloads
)) {
1686 mlx5_ib_dbg(dev
, "ipoib UD lso qp isn't supported\n");
1690 if (init_attr
->create_flags
& IB_QP_CREATE_SCATTER_FCS
) {
1691 if (init_attr
->qp_type
!= IB_QPT_RAW_PACKET
) {
1692 mlx5_ib_dbg(dev
, "Scatter FCS is supported only for Raw Packet QPs");
1695 if (!MLX5_CAP_GEN(dev
->mdev
, eth_net_offloads
) ||
1696 !MLX5_CAP_ETH(dev
->mdev
, scatter_fcs
)) {
1697 mlx5_ib_dbg(dev
, "Scatter FCS isn't supported\n");
1700 qp
->flags
|= MLX5_IB_QP_CAP_SCATTER_FCS
;
1703 if (init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
)
1704 qp
->sq_signal_bits
= MLX5_WQE_CTRL_CQ_UPDATE
;
1706 if (init_attr
->create_flags
& IB_QP_CREATE_CVLAN_STRIPPING
) {
1707 if (!(MLX5_CAP_GEN(dev
->mdev
, eth_net_offloads
) &&
1708 MLX5_CAP_ETH(dev
->mdev
, vlan_cap
)) ||
1709 (init_attr
->qp_type
!= IB_QPT_RAW_PACKET
))
1711 qp
->flags
|= MLX5_IB_QP_CVLAN_STRIPPING
;
1714 if (pd
&& pd
->uobject
) {
1715 if (ib_copy_from_udata(&ucmd
, udata
, sizeof(ucmd
))) {
1716 mlx5_ib_dbg(dev
, "copy failed\n");
1720 err
= get_qp_user_index(to_mucontext(pd
->uobject
->context
),
1721 &ucmd
, udata
->inlen
, &uidx
);
1725 qp
->wq_sig
= !!(ucmd
.flags
& MLX5_QP_FLAG_SIGNATURE
);
1726 qp
->scat_cqe
= !!(ucmd
.flags
& MLX5_QP_FLAG_SCATTER_CQE
);
1727 if (ucmd
.flags
& MLX5_QP_FLAG_TUNNEL_OFFLOADS
) {
1728 if (init_attr
->qp_type
!= IB_QPT_RAW_PACKET
||
1729 !tunnel_offload_supported(mdev
)) {
1730 mlx5_ib_dbg(dev
, "Tunnel offload isn't supported\n");
1733 qp
->flags_en
|= MLX5_QP_FLAG_TUNNEL_OFFLOADS
;
1736 if (ucmd
.flags
& MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC
) {
1737 if (init_attr
->qp_type
!= IB_QPT_RAW_PACKET
) {
1738 mlx5_ib_dbg(dev
, "Self-LB UC isn't supported\n");
1741 qp
->flags_en
|= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC
;
1744 if (ucmd
.flags
& MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC
) {
1745 if (init_attr
->qp_type
!= IB_QPT_RAW_PACKET
) {
1746 mlx5_ib_dbg(dev
, "Self-LB UM isn't supported\n");
1749 qp
->flags_en
|= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC
;
1752 if (init_attr
->create_flags
& IB_QP_CREATE_SOURCE_QPN
) {
1753 if (init_attr
->qp_type
!= IB_QPT_UD
||
1754 (MLX5_CAP_GEN(dev
->mdev
, port_type
) !=
1755 MLX5_CAP_PORT_TYPE_IB
) ||
1756 !mlx5_get_flow_namespace(dev
->mdev
, MLX5_FLOW_NAMESPACE_BYPASS
)) {
1757 mlx5_ib_dbg(dev
, "Source QP option isn't supported\n");
1761 qp
->flags
|= MLX5_IB_QP_UNDERLAY
;
1762 qp
->underlay_qpn
= init_attr
->source_qpn
;
1765 qp
->wq_sig
= !!wq_signature
;
1768 base
= (init_attr
->qp_type
== IB_QPT_RAW_PACKET
||
1769 qp
->flags
& MLX5_IB_QP_UNDERLAY
) ?
1770 &qp
->raw_packet_qp
.rq
.base
:
1773 qp
->has_rq
= qp_has_rq(init_attr
);
1774 err
= set_rq_size(dev
, &init_attr
->cap
, qp
->has_rq
,
1775 qp
, (pd
&& pd
->uobject
) ? &ucmd
: NULL
);
1777 mlx5_ib_dbg(dev
, "err %d\n", err
);
1784 1 << MLX5_CAP_GEN(mdev
, log_max_qp_sz
);
1785 mlx5_ib_dbg(dev
, "requested sq_wqe_count (%d)\n", ucmd
.sq_wqe_count
);
1786 if (ucmd
.rq_wqe_shift
!= qp
->rq
.wqe_shift
||
1787 ucmd
.rq_wqe_count
!= qp
->rq
.wqe_cnt
) {
1788 mlx5_ib_dbg(dev
, "invalid rq params\n");
1791 if (ucmd
.sq_wqe_count
> max_wqes
) {
1792 mlx5_ib_dbg(dev
, "requested sq_wqe_count (%d) > max allowed (%d)\n",
1793 ucmd
.sq_wqe_count
, max_wqes
);
1796 if (init_attr
->create_flags
&
1797 mlx5_ib_create_qp_sqpn_qp1()) {
1798 mlx5_ib_dbg(dev
, "user-space is not allowed to create UD QPs spoofing as QP1\n");
1801 err
= create_user_qp(dev
, pd
, qp
, udata
, init_attr
, &in
,
1802 &resp
, &inlen
, base
);
1804 mlx5_ib_dbg(dev
, "err %d\n", err
);
1806 err
= create_kernel_qp(dev
, init_attr
, qp
, &in
, &inlen
,
1809 mlx5_ib_dbg(dev
, "err %d\n", err
);
1815 in
= kvzalloc(inlen
, GFP_KERNEL
);
1819 qp
->create_type
= MLX5_QP_EMPTY
;
1822 if (is_sqp(init_attr
->qp_type
))
1823 qp
->port
= init_attr
->port_num
;
1825 qpc
= MLX5_ADDR_OF(create_qp_in
, in
, qpc
);
1827 MLX5_SET(qpc
, qpc
, st
, mlx5_st
);
1828 MLX5_SET(qpc
, qpc
, pm_state
, MLX5_QP_PM_MIGRATED
);
1830 if (init_attr
->qp_type
!= MLX5_IB_QPT_REG_UMR
)
1831 MLX5_SET(qpc
, qpc
, pd
, to_mpd(pd
? pd
: devr
->p0
)->pdn
);
1833 MLX5_SET(qpc
, qpc
, latency_sensitive
, 1);
1837 MLX5_SET(qpc
, qpc
, wq_signature
, 1);
1839 if (qp
->flags
& MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK
)
1840 MLX5_SET(qpc
, qpc
, block_lb_mc
, 1);
1842 if (qp
->flags
& MLX5_IB_QP_CROSS_CHANNEL
)
1843 MLX5_SET(qpc
, qpc
, cd_master
, 1);
1844 if (qp
->flags
& MLX5_IB_QP_MANAGED_SEND
)
1845 MLX5_SET(qpc
, qpc
, cd_slave_send
, 1);
1846 if (qp
->flags
& MLX5_IB_QP_MANAGED_RECV
)
1847 MLX5_SET(qpc
, qpc
, cd_slave_receive
, 1);
1849 if (qp
->scat_cqe
&& is_connected(init_attr
->qp_type
)) {
1853 rcqe_sz
= mlx5_ib_get_cqe_size(dev
, init_attr
->recv_cq
);
1854 scqe_sz
= mlx5_ib_get_cqe_size(dev
, init_attr
->send_cq
);
1857 MLX5_SET(qpc
, qpc
, cs_res
, MLX5_RES_SCAT_DATA64_CQE
);
1859 MLX5_SET(qpc
, qpc
, cs_res
, MLX5_RES_SCAT_DATA32_CQE
);
1861 if (init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
) {
1863 MLX5_SET(qpc
, qpc
, cs_req
, MLX5_REQ_SCAT_DATA64_CQE
);
1865 MLX5_SET(qpc
, qpc
, cs_req
, MLX5_REQ_SCAT_DATA32_CQE
);
1869 if (qp
->rq
.wqe_cnt
) {
1870 MLX5_SET(qpc
, qpc
, log_rq_stride
, qp
->rq
.wqe_shift
- 4);
1871 MLX5_SET(qpc
, qpc
, log_rq_size
, ilog2(qp
->rq
.wqe_cnt
));
1874 MLX5_SET(qpc
, qpc
, rq_type
, get_rx_type(qp
, init_attr
));
1876 if (qp
->sq
.wqe_cnt
) {
1877 MLX5_SET(qpc
, qpc
, log_sq_size
, ilog2(qp
->sq
.wqe_cnt
));
1879 MLX5_SET(qpc
, qpc
, no_sq
, 1);
1880 if (init_attr
->srq
&&
1881 init_attr
->srq
->srq_type
== IB_SRQT_TM
)
1882 MLX5_SET(qpc
, qpc
, offload_type
,
1883 MLX5_QPC_OFFLOAD_TYPE_RNDV
);
1886 /* Set default resources */
1887 switch (init_attr
->qp_type
) {
1888 case IB_QPT_XRC_TGT
:
1889 MLX5_SET(qpc
, qpc
, cqn_rcv
, to_mcq(devr
->c0
)->mcq
.cqn
);
1890 MLX5_SET(qpc
, qpc
, cqn_snd
, to_mcq(devr
->c0
)->mcq
.cqn
);
1891 MLX5_SET(qpc
, qpc
, srqn_rmpn_xrqn
, to_msrq(devr
->s0
)->msrq
.srqn
);
1892 MLX5_SET(qpc
, qpc
, xrcd
, to_mxrcd(init_attr
->xrcd
)->xrcdn
);
1894 case IB_QPT_XRC_INI
:
1895 MLX5_SET(qpc
, qpc
, cqn_rcv
, to_mcq(devr
->c0
)->mcq
.cqn
);
1896 MLX5_SET(qpc
, qpc
, xrcd
, to_mxrcd(devr
->x1
)->xrcdn
);
1897 MLX5_SET(qpc
, qpc
, srqn_rmpn_xrqn
, to_msrq(devr
->s0
)->msrq
.srqn
);
1900 if (init_attr
->srq
) {
1901 MLX5_SET(qpc
, qpc
, xrcd
, to_mxrcd(devr
->x0
)->xrcdn
);
1902 MLX5_SET(qpc
, qpc
, srqn_rmpn_xrqn
, to_msrq(init_attr
->srq
)->msrq
.srqn
);
1904 MLX5_SET(qpc
, qpc
, xrcd
, to_mxrcd(devr
->x1
)->xrcdn
);
1905 MLX5_SET(qpc
, qpc
, srqn_rmpn_xrqn
, to_msrq(devr
->s1
)->msrq
.srqn
);
1909 if (init_attr
->send_cq
)
1910 MLX5_SET(qpc
, qpc
, cqn_snd
, to_mcq(init_attr
->send_cq
)->mcq
.cqn
);
1912 if (init_attr
->recv_cq
)
1913 MLX5_SET(qpc
, qpc
, cqn_rcv
, to_mcq(init_attr
->recv_cq
)->mcq
.cqn
);
1915 MLX5_SET64(qpc
, qpc
, dbr_addr
, qp
->db
.dma
);
1917 /* 0xffffff means we ask to work with cqe version 0 */
1918 if (MLX5_CAP_GEN(mdev
, cqe_version
) == MLX5_CQE_VERSION_V1
)
1919 MLX5_SET(qpc
, qpc
, user_index
, uidx
);
1921 /* we use IB_QP_CREATE_IPOIB_UD_LSO to indicates ipoib qp */
1922 if (init_attr
->qp_type
== IB_QPT_UD
&&
1923 (init_attr
->create_flags
& IB_QP_CREATE_IPOIB_UD_LSO
)) {
1924 MLX5_SET(qpc
, qpc
, ulp_stateless_offload_mode
, 1);
1925 qp
->flags
|= MLX5_IB_QP_LSO
;
1928 if (init_attr
->create_flags
& IB_QP_CREATE_PCI_WRITE_END_PADDING
) {
1929 if (!MLX5_CAP_GEN(dev
->mdev
, end_pad
)) {
1930 mlx5_ib_dbg(dev
, "scatter end padding is not supported\n");
1933 } else if (init_attr
->qp_type
!= IB_QPT_RAW_PACKET
) {
1934 MLX5_SET(qpc
, qpc
, end_padding_mode
,
1935 MLX5_WQ_END_PAD_MODE_ALIGN
);
1937 qp
->flags
|= MLX5_IB_QP_PCI_WRITE_END_PADDING
;
1946 if (init_attr
->qp_type
== IB_QPT_RAW_PACKET
||
1947 qp
->flags
& MLX5_IB_QP_UNDERLAY
) {
1948 qp
->raw_packet_qp
.sq
.ubuffer
.buf_addr
= ucmd
.sq_buf_addr
;
1949 raw_packet_qp_copy_info(qp
, &qp
->raw_packet_qp
);
1950 err
= create_raw_packet_qp(dev
, qp
, in
, inlen
, pd
);
1952 err
= mlx5_core_create_qp(dev
->mdev
, &base
->mqp
, in
, inlen
);
1956 mlx5_ib_dbg(dev
, "create qp failed\n");
1962 base
->container_mibqp
= qp
;
1963 base
->mqp
.event
= mlx5_ib_qp_event
;
1965 get_cqs(init_attr
->qp_type
, init_attr
->send_cq
, init_attr
->recv_cq
,
1966 &send_cq
, &recv_cq
);
1967 spin_lock_irqsave(&dev
->reset_flow_resource_lock
, flags
);
1968 mlx5_ib_lock_cqs(send_cq
, recv_cq
);
1969 /* Maintain device to QPs access, needed for further handling via reset
1972 list_add_tail(&qp
->qps_list
, &dev
->qp_list
);
1973 /* Maintain CQ to QPs access, needed for further handling via reset flow
1976 list_add_tail(&qp
->cq_send_list
, &send_cq
->list_send_qp
);
1978 list_add_tail(&qp
->cq_recv_list
, &recv_cq
->list_recv_qp
);
1979 mlx5_ib_unlock_cqs(send_cq
, recv_cq
);
1980 spin_unlock_irqrestore(&dev
->reset_flow_resource_lock
, flags
);
1985 if (qp
->create_type
== MLX5_QP_USER
)
1986 destroy_qp_user(dev
, pd
, qp
, base
);
1987 else if (qp
->create_type
== MLX5_QP_KERNEL
)
1988 destroy_qp_kernel(dev
, qp
);
1995 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq
*send_cq
, struct mlx5_ib_cq
*recv_cq
)
1996 __acquires(&send_cq
->lock
) __acquires(&recv_cq
->lock
)
2000 if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
2001 spin_lock(&send_cq
->lock
);
2002 spin_lock_nested(&recv_cq
->lock
,
2003 SINGLE_DEPTH_NESTING
);
2004 } else if (send_cq
->mcq
.cqn
== recv_cq
->mcq
.cqn
) {
2005 spin_lock(&send_cq
->lock
);
2006 __acquire(&recv_cq
->lock
);
2008 spin_lock(&recv_cq
->lock
);
2009 spin_lock_nested(&send_cq
->lock
,
2010 SINGLE_DEPTH_NESTING
);
2013 spin_lock(&send_cq
->lock
);
2014 __acquire(&recv_cq
->lock
);
2016 } else if (recv_cq
) {
2017 spin_lock(&recv_cq
->lock
);
2018 __acquire(&send_cq
->lock
);
2020 __acquire(&send_cq
->lock
);
2021 __acquire(&recv_cq
->lock
);
2025 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq
*send_cq
, struct mlx5_ib_cq
*recv_cq
)
2026 __releases(&send_cq
->lock
) __releases(&recv_cq
->lock
)
2030 if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
2031 spin_unlock(&recv_cq
->lock
);
2032 spin_unlock(&send_cq
->lock
);
2033 } else if (send_cq
->mcq
.cqn
== recv_cq
->mcq
.cqn
) {
2034 __release(&recv_cq
->lock
);
2035 spin_unlock(&send_cq
->lock
);
2037 spin_unlock(&send_cq
->lock
);
2038 spin_unlock(&recv_cq
->lock
);
2041 __release(&recv_cq
->lock
);
2042 spin_unlock(&send_cq
->lock
);
2044 } else if (recv_cq
) {
2045 __release(&send_cq
->lock
);
2046 spin_unlock(&recv_cq
->lock
);
2048 __release(&recv_cq
->lock
);
2049 __release(&send_cq
->lock
);
2053 static struct mlx5_ib_pd
*get_pd(struct mlx5_ib_qp
*qp
)
2055 return to_mpd(qp
->ibqp
.pd
);
2058 static void get_cqs(enum ib_qp_type qp_type
,
2059 struct ib_cq
*ib_send_cq
, struct ib_cq
*ib_recv_cq
,
2060 struct mlx5_ib_cq
**send_cq
, struct mlx5_ib_cq
**recv_cq
)
2063 case IB_QPT_XRC_TGT
:
2067 case MLX5_IB_QPT_REG_UMR
:
2068 case IB_QPT_XRC_INI
:
2069 *send_cq
= ib_send_cq
? to_mcq(ib_send_cq
) : NULL
;
2074 case MLX5_IB_QPT_HW_GSI
:
2078 case IB_QPT_RAW_IPV6
:
2079 case IB_QPT_RAW_ETHERTYPE
:
2080 case IB_QPT_RAW_PACKET
:
2081 *send_cq
= ib_send_cq
? to_mcq(ib_send_cq
) : NULL
;
2082 *recv_cq
= ib_recv_cq
? to_mcq(ib_recv_cq
) : NULL
;
2093 static int modify_raw_packet_qp(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
2094 const struct mlx5_modify_raw_qp_param
*raw_qp_param
,
2095 u8 lag_tx_affinity
);
2097 static void destroy_qp_common(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
)
2099 struct mlx5_ib_cq
*send_cq
, *recv_cq
;
2100 struct mlx5_ib_qp_base
*base
;
2101 unsigned long flags
;
2104 if (qp
->ibqp
.rwq_ind_tbl
) {
2105 destroy_rss_raw_qp_tir(dev
, qp
);
2109 base
= (qp
->ibqp
.qp_type
== IB_QPT_RAW_PACKET
||
2110 qp
->flags
& MLX5_IB_QP_UNDERLAY
) ?
2111 &qp
->raw_packet_qp
.rq
.base
:
2114 if (qp
->state
!= IB_QPS_RESET
) {
2115 if (qp
->ibqp
.qp_type
!= IB_QPT_RAW_PACKET
&&
2116 !(qp
->flags
& MLX5_IB_QP_UNDERLAY
)) {
2117 err
= mlx5_core_qp_modify(dev
->mdev
,
2118 MLX5_CMD_OP_2RST_QP
, 0,
2121 struct mlx5_modify_raw_qp_param raw_qp_param
= {
2122 .operation
= MLX5_CMD_OP_2RST_QP
2125 err
= modify_raw_packet_qp(dev
, qp
, &raw_qp_param
, 0);
2128 mlx5_ib_warn(dev
, "mlx5_ib: modify QP 0x%06x to RESET failed\n",
2132 get_cqs(qp
->ibqp
.qp_type
, qp
->ibqp
.send_cq
, qp
->ibqp
.recv_cq
,
2133 &send_cq
, &recv_cq
);
2135 spin_lock_irqsave(&dev
->reset_flow_resource_lock
, flags
);
2136 mlx5_ib_lock_cqs(send_cq
, recv_cq
);
2137 /* del from lists under both locks above to protect reset flow paths */
2138 list_del(&qp
->qps_list
);
2140 list_del(&qp
->cq_send_list
);
2143 list_del(&qp
->cq_recv_list
);
2145 if (qp
->create_type
== MLX5_QP_KERNEL
) {
2146 __mlx5_ib_cq_clean(recv_cq
, base
->mqp
.qpn
,
2147 qp
->ibqp
.srq
? to_msrq(qp
->ibqp
.srq
) : NULL
);
2148 if (send_cq
!= recv_cq
)
2149 __mlx5_ib_cq_clean(send_cq
, base
->mqp
.qpn
,
2152 mlx5_ib_unlock_cqs(send_cq
, recv_cq
);
2153 spin_unlock_irqrestore(&dev
->reset_flow_resource_lock
, flags
);
2155 if (qp
->ibqp
.qp_type
== IB_QPT_RAW_PACKET
||
2156 qp
->flags
& MLX5_IB_QP_UNDERLAY
) {
2157 destroy_raw_packet_qp(dev
, qp
);
2159 err
= mlx5_core_destroy_qp(dev
->mdev
, &base
->mqp
);
2161 mlx5_ib_warn(dev
, "failed to destroy QP 0x%x\n",
2165 if (qp
->create_type
== MLX5_QP_KERNEL
)
2166 destroy_qp_kernel(dev
, qp
);
2167 else if (qp
->create_type
== MLX5_QP_USER
)
2168 destroy_qp_user(dev
, &get_pd(qp
)->ibpd
, qp
, base
);
2171 static const char *ib_qp_type_str(enum ib_qp_type type
)
2175 return "IB_QPT_SMI";
2177 return "IB_QPT_GSI";
2184 case IB_QPT_RAW_IPV6
:
2185 return "IB_QPT_RAW_IPV6";
2186 case IB_QPT_RAW_ETHERTYPE
:
2187 return "IB_QPT_RAW_ETHERTYPE";
2188 case IB_QPT_XRC_INI
:
2189 return "IB_QPT_XRC_INI";
2190 case IB_QPT_XRC_TGT
:
2191 return "IB_QPT_XRC_TGT";
2192 case IB_QPT_RAW_PACKET
:
2193 return "IB_QPT_RAW_PACKET";
2194 case MLX5_IB_QPT_REG_UMR
:
2195 return "MLX5_IB_QPT_REG_UMR";
2197 return "IB_QPT_DRIVER";
2200 return "Invalid QP type";
2204 static struct ib_qp
*mlx5_ib_create_dct(struct ib_pd
*pd
,
2205 struct ib_qp_init_attr
*attr
,
2206 struct mlx5_ib_create_qp
*ucmd
)
2208 struct mlx5_ib_qp
*qp
;
2210 u32 uidx
= MLX5_IB_DEFAULT_UIDX
;
2213 if (!attr
->srq
|| !attr
->recv_cq
)
2214 return ERR_PTR(-EINVAL
);
2216 err
= get_qp_user_index(to_mucontext(pd
->uobject
->context
),
2217 ucmd
, sizeof(*ucmd
), &uidx
);
2219 return ERR_PTR(err
);
2221 qp
= kzalloc(sizeof(*qp
), GFP_KERNEL
);
2223 return ERR_PTR(-ENOMEM
);
2225 qp
->dct
.in
= kzalloc(MLX5_ST_SZ_BYTES(create_dct_in
), GFP_KERNEL
);
2231 dctc
= MLX5_ADDR_OF(create_dct_in
, qp
->dct
.in
, dct_context_entry
);
2232 qp
->qp_sub_type
= MLX5_IB_QPT_DCT
;
2233 MLX5_SET(dctc
, dctc
, pd
, to_mpd(pd
)->pdn
);
2234 MLX5_SET(dctc
, dctc
, srqn_xrqn
, to_msrq(attr
->srq
)->msrq
.srqn
);
2235 MLX5_SET(dctc
, dctc
, cqn
, to_mcq(attr
->recv_cq
)->mcq
.cqn
);
2236 MLX5_SET64(dctc
, dctc
, dc_access_key
, ucmd
->access_key
);
2237 MLX5_SET(dctc
, dctc
, user_index
, uidx
);
2239 qp
->state
= IB_QPS_RESET
;
2244 return ERR_PTR(err
);
2247 static int set_mlx_qp_type(struct mlx5_ib_dev
*dev
,
2248 struct ib_qp_init_attr
*init_attr
,
2249 struct mlx5_ib_create_qp
*ucmd
,
2250 struct ib_udata
*udata
)
2252 enum { MLX_QP_FLAGS
= MLX5_QP_FLAG_TYPE_DCT
| MLX5_QP_FLAG_TYPE_DCI
};
2258 if (udata
->inlen
< sizeof(*ucmd
)) {
2259 mlx5_ib_dbg(dev
, "create_qp user command is smaller than expected\n");
2262 err
= ib_copy_from_udata(ucmd
, udata
, sizeof(*ucmd
));
2266 if ((ucmd
->flags
& MLX_QP_FLAGS
) == MLX5_QP_FLAG_TYPE_DCI
) {
2267 init_attr
->qp_type
= MLX5_IB_QPT_DCI
;
2269 if ((ucmd
->flags
& MLX_QP_FLAGS
) == MLX5_QP_FLAG_TYPE_DCT
) {
2270 init_attr
->qp_type
= MLX5_IB_QPT_DCT
;
2272 mlx5_ib_dbg(dev
, "Invalid QP flags\n");
2277 if (!MLX5_CAP_GEN(dev
->mdev
, dct
)) {
2278 mlx5_ib_dbg(dev
, "DC transport is not supported\n");
2285 struct ib_qp
*mlx5_ib_create_qp(struct ib_pd
*pd
,
2286 struct ib_qp_init_attr
*verbs_init_attr
,
2287 struct ib_udata
*udata
)
2289 struct mlx5_ib_dev
*dev
;
2290 struct mlx5_ib_qp
*qp
;
2293 struct ib_qp_init_attr mlx_init_attr
;
2294 struct ib_qp_init_attr
*init_attr
= verbs_init_attr
;
2297 dev
= to_mdev(pd
->device
);
2299 if (init_attr
->qp_type
== IB_QPT_RAW_PACKET
) {
2301 mlx5_ib_dbg(dev
, "Raw Packet QP is not supported for kernel consumers\n");
2302 return ERR_PTR(-EINVAL
);
2303 } else if (!to_mucontext(pd
->uobject
->context
)->cqe_version
) {
2304 mlx5_ib_dbg(dev
, "Raw Packet QP is only supported for CQE version > 0\n");
2305 return ERR_PTR(-EINVAL
);
2309 /* being cautious here */
2310 if (init_attr
->qp_type
!= IB_QPT_XRC_TGT
&&
2311 init_attr
->qp_type
!= MLX5_IB_QPT_REG_UMR
) {
2312 pr_warn("%s: no PD for transport %s\n", __func__
,
2313 ib_qp_type_str(init_attr
->qp_type
));
2314 return ERR_PTR(-EINVAL
);
2316 dev
= to_mdev(to_mxrcd(init_attr
->xrcd
)->ibxrcd
.device
);
2319 if (init_attr
->qp_type
== IB_QPT_DRIVER
) {
2320 struct mlx5_ib_create_qp ucmd
;
2322 init_attr
= &mlx_init_attr
;
2323 memcpy(init_attr
, verbs_init_attr
, sizeof(*verbs_init_attr
));
2324 err
= set_mlx_qp_type(dev
, init_attr
, &ucmd
, udata
);
2326 return ERR_PTR(err
);
2328 if (init_attr
->qp_type
== MLX5_IB_QPT_DCI
) {
2329 if (init_attr
->cap
.max_recv_wr
||
2330 init_attr
->cap
.max_recv_sge
) {
2331 mlx5_ib_dbg(dev
, "DCI QP requires zero size receive queue\n");
2332 return ERR_PTR(-EINVAL
);
2335 return mlx5_ib_create_dct(pd
, init_attr
, &ucmd
);
2339 switch (init_attr
->qp_type
) {
2340 case IB_QPT_XRC_TGT
:
2341 case IB_QPT_XRC_INI
:
2342 if (!MLX5_CAP_GEN(dev
->mdev
, xrc
)) {
2343 mlx5_ib_dbg(dev
, "XRC not supported\n");
2344 return ERR_PTR(-ENOSYS
);
2346 init_attr
->recv_cq
= NULL
;
2347 if (init_attr
->qp_type
== IB_QPT_XRC_TGT
) {
2348 xrcdn
= to_mxrcd(init_attr
->xrcd
)->xrcdn
;
2349 init_attr
->send_cq
= NULL
;
2353 case IB_QPT_RAW_PACKET
:
2358 case MLX5_IB_QPT_HW_GSI
:
2359 case MLX5_IB_QPT_REG_UMR
:
2360 case MLX5_IB_QPT_DCI
:
2361 qp
= kzalloc(sizeof(*qp
), GFP_KERNEL
);
2363 return ERR_PTR(-ENOMEM
);
2365 err
= create_qp_common(dev
, pd
, init_attr
, udata
, qp
);
2367 mlx5_ib_dbg(dev
, "create_qp_common failed\n");
2369 return ERR_PTR(err
);
2372 if (is_qp0(init_attr
->qp_type
))
2373 qp
->ibqp
.qp_num
= 0;
2374 else if (is_qp1(init_attr
->qp_type
))
2375 qp
->ibqp
.qp_num
= 1;
2377 qp
->ibqp
.qp_num
= qp
->trans_qp
.base
.mqp
.qpn
;
2379 mlx5_ib_dbg(dev
, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
2380 qp
->ibqp
.qp_num
, qp
->trans_qp
.base
.mqp
.qpn
,
2381 init_attr
->recv_cq
? to_mcq(init_attr
->recv_cq
)->mcq
.cqn
: -1,
2382 init_attr
->send_cq
? to_mcq(init_attr
->send_cq
)->mcq
.cqn
: -1);
2384 qp
->trans_qp
.xrcdn
= xrcdn
;
2389 return mlx5_ib_gsi_create_qp(pd
, init_attr
);
2391 case IB_QPT_RAW_IPV6
:
2392 case IB_QPT_RAW_ETHERTYPE
:
2395 mlx5_ib_dbg(dev
, "unsupported qp type %d\n",
2396 init_attr
->qp_type
);
2397 /* Don't support raw QPs */
2398 return ERR_PTR(-EINVAL
);
2401 if (verbs_init_attr
->qp_type
== IB_QPT_DRIVER
)
2402 qp
->qp_sub_type
= init_attr
->qp_type
;
2407 static int mlx5_ib_destroy_dct(struct mlx5_ib_qp
*mqp
)
2409 struct mlx5_ib_dev
*dev
= to_mdev(mqp
->ibqp
.device
);
2411 if (mqp
->state
== IB_QPS_RTR
) {
2414 err
= mlx5_core_destroy_dct(dev
->mdev
, &mqp
->dct
.mdct
);
2416 mlx5_ib_warn(dev
, "failed to destroy DCT %d\n", err
);
2426 int mlx5_ib_destroy_qp(struct ib_qp
*qp
)
2428 struct mlx5_ib_dev
*dev
= to_mdev(qp
->device
);
2429 struct mlx5_ib_qp
*mqp
= to_mqp(qp
);
2431 if (unlikely(qp
->qp_type
== IB_QPT_GSI
))
2432 return mlx5_ib_gsi_destroy_qp(qp
);
2434 if (mqp
->qp_sub_type
== MLX5_IB_QPT_DCT
)
2435 return mlx5_ib_destroy_dct(mqp
);
2437 destroy_qp_common(dev
, mqp
);
2444 static __be32
to_mlx5_access_flags(struct mlx5_ib_qp
*qp
, const struct ib_qp_attr
*attr
,
2447 u32 hw_access_flags
= 0;
2451 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
2452 dest_rd_atomic
= attr
->max_dest_rd_atomic
;
2454 dest_rd_atomic
= qp
->trans_qp
.resp_depth
;
2456 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
2457 access_flags
= attr
->qp_access_flags
;
2459 access_flags
= qp
->trans_qp
.atomic_rd_en
;
2461 if (!dest_rd_atomic
)
2462 access_flags
&= IB_ACCESS_REMOTE_WRITE
;
2464 if (access_flags
& IB_ACCESS_REMOTE_READ
)
2465 hw_access_flags
|= MLX5_QP_BIT_RRE
;
2466 if (access_flags
& IB_ACCESS_REMOTE_ATOMIC
)
2467 hw_access_flags
|= (MLX5_QP_BIT_RAE
| MLX5_ATOMIC_MODE_CX
);
2468 if (access_flags
& IB_ACCESS_REMOTE_WRITE
)
2469 hw_access_flags
|= MLX5_QP_BIT_RWE
;
2471 return cpu_to_be32(hw_access_flags
);
2475 MLX5_PATH_FLAG_FL
= 1 << 0,
2476 MLX5_PATH_FLAG_FREE_AR
= 1 << 1,
2477 MLX5_PATH_FLAG_COUNTER
= 1 << 2,
2480 static int ib_rate_to_mlx5(struct mlx5_ib_dev
*dev
, u8 rate
)
2482 if (rate
== IB_RATE_PORT_CURRENT
)
2485 if (rate
< IB_RATE_2_5_GBPS
|| rate
> IB_RATE_300_GBPS
)
2488 while (rate
!= IB_RATE_PORT_CURRENT
&&
2489 !(1 << (rate
+ MLX5_STAT_RATE_OFFSET
) &
2490 MLX5_CAP_GEN(dev
->mdev
, stat_rate_support
)))
2493 return rate
? rate
+ MLX5_STAT_RATE_OFFSET
: rate
;
2496 static int modify_raw_packet_eth_prio(struct mlx5_core_dev
*dev
,
2497 struct mlx5_ib_sq
*sq
, u8 sl
)
2504 inlen
= MLX5_ST_SZ_BYTES(modify_tis_in
);
2505 in
= kvzalloc(inlen
, GFP_KERNEL
);
2509 MLX5_SET(modify_tis_in
, in
, bitmask
.prio
, 1);
2511 tisc
= MLX5_ADDR_OF(modify_tis_in
, in
, ctx
);
2512 MLX5_SET(tisc
, tisc
, prio
, ((sl
& 0x7) << 1));
2514 err
= mlx5_core_modify_tis(dev
, sq
->tisn
, in
, inlen
);
2521 static int modify_raw_packet_tx_affinity(struct mlx5_core_dev
*dev
,
2522 struct mlx5_ib_sq
*sq
, u8 tx_affinity
)
2529 inlen
= MLX5_ST_SZ_BYTES(modify_tis_in
);
2530 in
= kvzalloc(inlen
, GFP_KERNEL
);
2534 MLX5_SET(modify_tis_in
, in
, bitmask
.lag_tx_port_affinity
, 1);
2536 tisc
= MLX5_ADDR_OF(modify_tis_in
, in
, ctx
);
2537 MLX5_SET(tisc
, tisc
, lag_tx_port_affinity
, tx_affinity
);
2539 err
= mlx5_core_modify_tis(dev
, sq
->tisn
, in
, inlen
);
2546 static int mlx5_set_path(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
2547 const struct rdma_ah_attr
*ah
,
2548 struct mlx5_qp_path
*path
, u8 port
, int attr_mask
,
2549 u32 path_flags
, const struct ib_qp_attr
*attr
,
2552 const struct ib_global_route
*grh
= rdma_ah_read_grh(ah
);
2554 enum ib_gid_type gid_type
;
2555 u8 ah_flags
= rdma_ah_get_ah_flags(ah
);
2556 u8 sl
= rdma_ah_get_sl(ah
);
2558 if (attr_mask
& IB_QP_PKEY_INDEX
)
2559 path
->pkey_index
= cpu_to_be16(alt
? attr
->alt_pkey_index
:
2562 if (ah_flags
& IB_AH_GRH
) {
2563 if (grh
->sgid_index
>=
2564 dev
->mdev
->port_caps
[port
- 1].gid_table_len
) {
2565 pr_err("sgid_index (%u) too large. max is %d\n",
2567 dev
->mdev
->port_caps
[port
- 1].gid_table_len
);
2572 if (ah
->type
== RDMA_AH_ATTR_TYPE_ROCE
) {
2573 if (!(ah_flags
& IB_AH_GRH
))
2576 memcpy(path
->rmac
, ah
->roce
.dmac
, sizeof(ah
->roce
.dmac
));
2577 if (qp
->ibqp
.qp_type
== IB_QPT_RC
||
2578 qp
->ibqp
.qp_type
== IB_QPT_UC
||
2579 qp
->ibqp
.qp_type
== IB_QPT_XRC_INI
||
2580 qp
->ibqp
.qp_type
== IB_QPT_XRC_TGT
)
2582 mlx5_get_roce_udp_sport(dev
, ah
->grh
.sgid_attr
);
2583 path
->dci_cfi_prio_sl
= (sl
& 0x7) << 4;
2584 gid_type
= ah
->grh
.sgid_attr
->gid_type
;
2585 if (gid_type
== IB_GID_TYPE_ROCE_UDP_ENCAP
)
2586 path
->ecn_dscp
= (grh
->traffic_class
>> 2) & 0x3f;
2588 path
->fl_free_ar
= (path_flags
& MLX5_PATH_FLAG_FL
) ? 0x80 : 0;
2590 (path_flags
& MLX5_PATH_FLAG_FREE_AR
) ? 0x40 : 0;
2591 path
->rlid
= cpu_to_be16(rdma_ah_get_dlid(ah
));
2592 path
->grh_mlid
= rdma_ah_get_path_bits(ah
) & 0x7f;
2593 if (ah_flags
& IB_AH_GRH
)
2594 path
->grh_mlid
|= 1 << 7;
2595 path
->dci_cfi_prio_sl
= sl
& 0xf;
2598 if (ah_flags
& IB_AH_GRH
) {
2599 path
->mgid_index
= grh
->sgid_index
;
2600 path
->hop_limit
= grh
->hop_limit
;
2601 path
->tclass_flowlabel
=
2602 cpu_to_be32((grh
->traffic_class
<< 20) |
2604 memcpy(path
->rgid
, grh
->dgid
.raw
, 16);
2607 err
= ib_rate_to_mlx5(dev
, rdma_ah_get_static_rate(ah
));
2610 path
->static_rate
= err
;
2613 if (attr_mask
& IB_QP_TIMEOUT
)
2614 path
->ackto_lt
= (alt
? attr
->alt_timeout
: attr
->timeout
) << 3;
2616 if ((qp
->ibqp
.qp_type
== IB_QPT_RAW_PACKET
) && qp
->sq
.wqe_cnt
)
2617 return modify_raw_packet_eth_prio(dev
->mdev
,
2618 &qp
->raw_packet_qp
.sq
,
2624 static enum mlx5_qp_optpar opt_mask
[MLX5_QP_NUM_STATE
][MLX5_QP_NUM_STATE
][MLX5_QP_ST_MAX
] = {
2625 [MLX5_QP_STATE_INIT
] = {
2626 [MLX5_QP_STATE_INIT
] = {
2627 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_RRE
|
2628 MLX5_QP_OPTPAR_RAE
|
2629 MLX5_QP_OPTPAR_RWE
|
2630 MLX5_QP_OPTPAR_PKEY_INDEX
|
2631 MLX5_QP_OPTPAR_PRI_PORT
,
2632 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_RWE
|
2633 MLX5_QP_OPTPAR_PKEY_INDEX
|
2634 MLX5_QP_OPTPAR_PRI_PORT
,
2635 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_PKEY_INDEX
|
2636 MLX5_QP_OPTPAR_Q_KEY
|
2637 MLX5_QP_OPTPAR_PRI_PORT
,
2639 [MLX5_QP_STATE_RTR
] = {
2640 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
2641 MLX5_QP_OPTPAR_RRE
|
2642 MLX5_QP_OPTPAR_RAE
|
2643 MLX5_QP_OPTPAR_RWE
|
2644 MLX5_QP_OPTPAR_PKEY_INDEX
,
2645 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
2646 MLX5_QP_OPTPAR_RWE
|
2647 MLX5_QP_OPTPAR_PKEY_INDEX
,
2648 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_PKEY_INDEX
|
2649 MLX5_QP_OPTPAR_Q_KEY
,
2650 [MLX5_QP_ST_MLX
] = MLX5_QP_OPTPAR_PKEY_INDEX
|
2651 MLX5_QP_OPTPAR_Q_KEY
,
2652 [MLX5_QP_ST_XRC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
2653 MLX5_QP_OPTPAR_RRE
|
2654 MLX5_QP_OPTPAR_RAE
|
2655 MLX5_QP_OPTPAR_RWE
|
2656 MLX5_QP_OPTPAR_PKEY_INDEX
,
2659 [MLX5_QP_STATE_RTR
] = {
2660 [MLX5_QP_STATE_RTS
] = {
2661 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
2662 MLX5_QP_OPTPAR_RRE
|
2663 MLX5_QP_OPTPAR_RAE
|
2664 MLX5_QP_OPTPAR_RWE
|
2665 MLX5_QP_OPTPAR_PM_STATE
|
2666 MLX5_QP_OPTPAR_RNR_TIMEOUT
,
2667 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
2668 MLX5_QP_OPTPAR_RWE
|
2669 MLX5_QP_OPTPAR_PM_STATE
,
2670 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_Q_KEY
,
2673 [MLX5_QP_STATE_RTS
] = {
2674 [MLX5_QP_STATE_RTS
] = {
2675 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_RRE
|
2676 MLX5_QP_OPTPAR_RAE
|
2677 MLX5_QP_OPTPAR_RWE
|
2678 MLX5_QP_OPTPAR_RNR_TIMEOUT
|
2679 MLX5_QP_OPTPAR_PM_STATE
|
2680 MLX5_QP_OPTPAR_ALT_ADDR_PATH
,
2681 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_RWE
|
2682 MLX5_QP_OPTPAR_PM_STATE
|
2683 MLX5_QP_OPTPAR_ALT_ADDR_PATH
,
2684 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_Q_KEY
|
2685 MLX5_QP_OPTPAR_SRQN
|
2686 MLX5_QP_OPTPAR_CQN_RCV
,
2689 [MLX5_QP_STATE_SQER
] = {
2690 [MLX5_QP_STATE_RTS
] = {
2691 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_Q_KEY
,
2692 [MLX5_QP_ST_MLX
] = MLX5_QP_OPTPAR_Q_KEY
,
2693 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_RWE
,
2694 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_RNR_TIMEOUT
|
2695 MLX5_QP_OPTPAR_RWE
|
2696 MLX5_QP_OPTPAR_RAE
|
2702 static int ib_nr_to_mlx5_nr(int ib_mask
)
2707 case IB_QP_CUR_STATE
:
2709 case IB_QP_EN_SQD_ASYNC_NOTIFY
:
2711 case IB_QP_ACCESS_FLAGS
:
2712 return MLX5_QP_OPTPAR_RWE
| MLX5_QP_OPTPAR_RRE
|
2714 case IB_QP_PKEY_INDEX
:
2715 return MLX5_QP_OPTPAR_PKEY_INDEX
;
2717 return MLX5_QP_OPTPAR_PRI_PORT
;
2719 return MLX5_QP_OPTPAR_Q_KEY
;
2721 return MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH
|
2722 MLX5_QP_OPTPAR_PRI_PORT
;
2723 case IB_QP_PATH_MTU
:
2726 return MLX5_QP_OPTPAR_ACK_TIMEOUT
;
2727 case IB_QP_RETRY_CNT
:
2728 return MLX5_QP_OPTPAR_RETRY_COUNT
;
2729 case IB_QP_RNR_RETRY
:
2730 return MLX5_QP_OPTPAR_RNR_RETRY
;
2733 case IB_QP_MAX_QP_RD_ATOMIC
:
2734 return MLX5_QP_OPTPAR_SRA_MAX
;
2735 case IB_QP_ALT_PATH
:
2736 return MLX5_QP_OPTPAR_ALT_ADDR_PATH
;
2737 case IB_QP_MIN_RNR_TIMER
:
2738 return MLX5_QP_OPTPAR_RNR_TIMEOUT
;
2741 case IB_QP_MAX_DEST_RD_ATOMIC
:
2742 return MLX5_QP_OPTPAR_RRA_MAX
| MLX5_QP_OPTPAR_RWE
|
2743 MLX5_QP_OPTPAR_RRE
| MLX5_QP_OPTPAR_RAE
;
2744 case IB_QP_PATH_MIG_STATE
:
2745 return MLX5_QP_OPTPAR_PM_STATE
;
2748 case IB_QP_DEST_QPN
:
2754 static int ib_mask_to_mlx5_opt(int ib_mask
)
2759 for (i
= 0; i
< 8 * sizeof(int); i
++) {
2760 if ((1 << i
) & ib_mask
)
2761 result
|= ib_nr_to_mlx5_nr(1 << i
);
2767 static int modify_raw_packet_qp_rq(struct mlx5_ib_dev
*dev
,
2768 struct mlx5_ib_rq
*rq
, int new_state
,
2769 const struct mlx5_modify_raw_qp_param
*raw_qp_param
)
2776 inlen
= MLX5_ST_SZ_BYTES(modify_rq_in
);
2777 in
= kvzalloc(inlen
, GFP_KERNEL
);
2781 MLX5_SET(modify_rq_in
, in
, rq_state
, rq
->state
);
2783 rqc
= MLX5_ADDR_OF(modify_rq_in
, in
, ctx
);
2784 MLX5_SET(rqc
, rqc
, state
, new_state
);
2786 if (raw_qp_param
->set_mask
& MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID
) {
2787 if (MLX5_CAP_GEN(dev
->mdev
, modify_rq_counter_set_id
)) {
2788 MLX5_SET64(modify_rq_in
, in
, modify_bitmask
,
2789 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID
);
2790 MLX5_SET(rqc
, rqc
, counter_set_id
, raw_qp_param
->rq_q_ctr_id
);
2792 pr_info_once("%s: RAW PACKET QP counters are not supported on current FW\n",
2796 err
= mlx5_core_modify_rq(dev
->mdev
, rq
->base
.mqp
.qpn
, in
, inlen
);
2800 rq
->state
= new_state
;
2807 static int modify_raw_packet_qp_sq(struct mlx5_core_dev
*dev
,
2808 struct mlx5_ib_sq
*sq
,
2810 const struct mlx5_modify_raw_qp_param
*raw_qp_param
)
2812 struct mlx5_ib_qp
*ibqp
= sq
->base
.container_mibqp
;
2813 struct mlx5_rate_limit old_rl
= ibqp
->rl
;
2814 struct mlx5_rate_limit new_rl
= old_rl
;
2815 bool new_rate_added
= false;
2822 inlen
= MLX5_ST_SZ_BYTES(modify_sq_in
);
2823 in
= kvzalloc(inlen
, GFP_KERNEL
);
2827 MLX5_SET(modify_sq_in
, in
, sq_state
, sq
->state
);
2829 sqc
= MLX5_ADDR_OF(modify_sq_in
, in
, ctx
);
2830 MLX5_SET(sqc
, sqc
, state
, new_state
);
2832 if (raw_qp_param
->set_mask
& MLX5_RAW_QP_RATE_LIMIT
) {
2833 if (new_state
!= MLX5_SQC_STATE_RDY
)
2834 pr_warn("%s: Rate limit can only be changed when SQ is moving to RDY\n",
2837 new_rl
= raw_qp_param
->rl
;
2840 if (!mlx5_rl_are_equal(&old_rl
, &new_rl
)) {
2842 err
= mlx5_rl_add_rate(dev
, &rl_index
, &new_rl
);
2844 pr_err("Failed configuring rate limit(err %d): \
2845 rate %u, max_burst_sz %u, typical_pkt_sz %u\n",
2846 err
, new_rl
.rate
, new_rl
.max_burst_sz
,
2847 new_rl
.typical_pkt_sz
);
2851 new_rate_added
= true;
2854 MLX5_SET64(modify_sq_in
, in
, modify_bitmask
, 1);
2855 /* index 0 means no limit */
2856 MLX5_SET(sqc
, sqc
, packet_pacing_rate_limit_index
, rl_index
);
2859 err
= mlx5_core_modify_sq(dev
, sq
->base
.mqp
.qpn
, in
, inlen
);
2861 /* Remove new rate from table if failed */
2863 mlx5_rl_remove_rate(dev
, &new_rl
);
2867 /* Only remove the old rate after new rate was set */
2869 !mlx5_rl_are_equal(&old_rl
, &new_rl
)) ||
2870 (new_state
!= MLX5_SQC_STATE_RDY
))
2871 mlx5_rl_remove_rate(dev
, &old_rl
);
2874 sq
->state
= new_state
;
2881 static int modify_raw_packet_qp(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
2882 const struct mlx5_modify_raw_qp_param
*raw_qp_param
,
2885 struct mlx5_ib_raw_packet_qp
*raw_packet_qp
= &qp
->raw_packet_qp
;
2886 struct mlx5_ib_rq
*rq
= &raw_packet_qp
->rq
;
2887 struct mlx5_ib_sq
*sq
= &raw_packet_qp
->sq
;
2888 int modify_rq
= !!qp
->rq
.wqe_cnt
;
2889 int modify_sq
= !!qp
->sq
.wqe_cnt
;
2894 switch (raw_qp_param
->operation
) {
2895 case MLX5_CMD_OP_RST2INIT_QP
:
2896 rq_state
= MLX5_RQC_STATE_RDY
;
2897 sq_state
= MLX5_SQC_STATE_RDY
;
2899 case MLX5_CMD_OP_2ERR_QP
:
2900 rq_state
= MLX5_RQC_STATE_ERR
;
2901 sq_state
= MLX5_SQC_STATE_ERR
;
2903 case MLX5_CMD_OP_2RST_QP
:
2904 rq_state
= MLX5_RQC_STATE_RST
;
2905 sq_state
= MLX5_SQC_STATE_RST
;
2907 case MLX5_CMD_OP_RTR2RTS_QP
:
2908 case MLX5_CMD_OP_RTS2RTS_QP
:
2909 if (raw_qp_param
->set_mask
==
2910 MLX5_RAW_QP_RATE_LIMIT
) {
2912 sq_state
= sq
->state
;
2914 return raw_qp_param
->set_mask
? -EINVAL
: 0;
2917 case MLX5_CMD_OP_INIT2INIT_QP
:
2918 case MLX5_CMD_OP_INIT2RTR_QP
:
2919 if (raw_qp_param
->set_mask
)
2929 err
= modify_raw_packet_qp_rq(dev
, rq
, rq_state
, raw_qp_param
);
2936 err
= modify_raw_packet_tx_affinity(dev
->mdev
, sq
,
2942 return modify_raw_packet_qp_sq(dev
->mdev
, sq
, sq_state
, raw_qp_param
);
2948 static int __mlx5_ib_modify_qp(struct ib_qp
*ibqp
,
2949 const struct ib_qp_attr
*attr
, int attr_mask
,
2950 enum ib_qp_state cur_state
, enum ib_qp_state new_state
,
2951 const struct mlx5_ib_modify_qp
*ucmd
)
2953 static const u16 optab
[MLX5_QP_NUM_STATE
][MLX5_QP_NUM_STATE
] = {
2954 [MLX5_QP_STATE_RST
] = {
2955 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
2956 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
2957 [MLX5_QP_STATE_INIT
] = MLX5_CMD_OP_RST2INIT_QP
,
2959 [MLX5_QP_STATE_INIT
] = {
2960 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
2961 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
2962 [MLX5_QP_STATE_INIT
] = MLX5_CMD_OP_INIT2INIT_QP
,
2963 [MLX5_QP_STATE_RTR
] = MLX5_CMD_OP_INIT2RTR_QP
,
2965 [MLX5_QP_STATE_RTR
] = {
2966 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
2967 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
2968 [MLX5_QP_STATE_RTS
] = MLX5_CMD_OP_RTR2RTS_QP
,
2970 [MLX5_QP_STATE_RTS
] = {
2971 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
2972 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
2973 [MLX5_QP_STATE_RTS
] = MLX5_CMD_OP_RTS2RTS_QP
,
2975 [MLX5_QP_STATE_SQD
] = {
2976 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
2977 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
2979 [MLX5_QP_STATE_SQER
] = {
2980 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
2981 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
2982 [MLX5_QP_STATE_RTS
] = MLX5_CMD_OP_SQERR2RTS_QP
,
2984 [MLX5_QP_STATE_ERR
] = {
2985 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
2986 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
2990 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
2991 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
2992 struct mlx5_ib_qp_base
*base
= &qp
->trans_qp
.base
;
2993 struct mlx5_ib_cq
*send_cq
, *recv_cq
;
2994 struct mlx5_qp_context
*context
;
2995 struct mlx5_ib_pd
*pd
;
2996 struct mlx5_ib_port
*mibport
= NULL
;
2997 enum mlx5_qp_state mlx5_cur
, mlx5_new
;
2998 enum mlx5_qp_optpar optpar
;
3004 mlx5_st
= to_mlx5_st(ibqp
->qp_type
== IB_QPT_DRIVER
?
3005 qp
->qp_sub_type
: ibqp
->qp_type
);
3009 context
= kzalloc(sizeof(*context
), GFP_KERNEL
);
3013 context
->flags
= cpu_to_be32(mlx5_st
<< 16);
3015 if (!(attr_mask
& IB_QP_PATH_MIG_STATE
)) {
3016 context
->flags
|= cpu_to_be32(MLX5_QP_PM_MIGRATED
<< 11);
3018 switch (attr
->path_mig_state
) {
3019 case IB_MIG_MIGRATED
:
3020 context
->flags
|= cpu_to_be32(MLX5_QP_PM_MIGRATED
<< 11);
3023 context
->flags
|= cpu_to_be32(MLX5_QP_PM_REARM
<< 11);
3026 context
->flags
|= cpu_to_be32(MLX5_QP_PM_ARMED
<< 11);
3031 if ((cur_state
== IB_QPS_RESET
) && (new_state
== IB_QPS_INIT
)) {
3032 if ((ibqp
->qp_type
== IB_QPT_RC
) ||
3033 (ibqp
->qp_type
== IB_QPT_UD
&&
3034 !(qp
->flags
& MLX5_IB_QP_SQPN_QP1
)) ||
3035 (ibqp
->qp_type
== IB_QPT_UC
) ||
3036 (ibqp
->qp_type
== IB_QPT_RAW_PACKET
) ||
3037 (ibqp
->qp_type
== IB_QPT_XRC_INI
) ||
3038 (ibqp
->qp_type
== IB_QPT_XRC_TGT
)) {
3039 if (mlx5_lag_is_active(dev
->mdev
)) {
3040 u8 p
= mlx5_core_native_port_num(dev
->mdev
);
3041 tx_affinity
= (unsigned int)atomic_add_return(1,
3042 &dev
->roce
[p
].next_port
) %
3044 context
->flags
|= cpu_to_be32(tx_affinity
<< 24);
3049 if (is_sqp(ibqp
->qp_type
)) {
3050 context
->mtu_msgmax
= (IB_MTU_256
<< 5) | 8;
3051 } else if ((ibqp
->qp_type
== IB_QPT_UD
&&
3052 !(qp
->flags
& MLX5_IB_QP_UNDERLAY
)) ||
3053 ibqp
->qp_type
== MLX5_IB_QPT_REG_UMR
) {
3054 context
->mtu_msgmax
= (IB_MTU_4096
<< 5) | 12;
3055 } else if (attr_mask
& IB_QP_PATH_MTU
) {
3056 if (attr
->path_mtu
< IB_MTU_256
||
3057 attr
->path_mtu
> IB_MTU_4096
) {
3058 mlx5_ib_warn(dev
, "invalid mtu %d\n", attr
->path_mtu
);
3062 context
->mtu_msgmax
= (attr
->path_mtu
<< 5) |
3063 (u8
)MLX5_CAP_GEN(dev
->mdev
, log_max_msg
);
3066 if (attr_mask
& IB_QP_DEST_QPN
)
3067 context
->log_pg_sz_remote_qpn
= cpu_to_be32(attr
->dest_qp_num
);
3069 if (attr_mask
& IB_QP_PKEY_INDEX
)
3070 context
->pri_path
.pkey_index
= cpu_to_be16(attr
->pkey_index
);
3072 /* todo implement counter_index functionality */
3074 if (is_sqp(ibqp
->qp_type
))
3075 context
->pri_path
.port
= qp
->port
;
3077 if (attr_mask
& IB_QP_PORT
)
3078 context
->pri_path
.port
= attr
->port_num
;
3080 if (attr_mask
& IB_QP_AV
) {
3081 err
= mlx5_set_path(dev
, qp
, &attr
->ah_attr
, &context
->pri_path
,
3082 attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
,
3083 attr_mask
, 0, attr
, false);
3088 if (attr_mask
& IB_QP_TIMEOUT
)
3089 context
->pri_path
.ackto_lt
|= attr
->timeout
<< 3;
3091 if (attr_mask
& IB_QP_ALT_PATH
) {
3092 err
= mlx5_set_path(dev
, qp
, &attr
->alt_ah_attr
,
3095 attr_mask
| IB_QP_PKEY_INDEX
| IB_QP_TIMEOUT
,
3102 get_cqs(qp
->ibqp
.qp_type
, qp
->ibqp
.send_cq
, qp
->ibqp
.recv_cq
,
3103 &send_cq
, &recv_cq
);
3105 context
->flags_pd
= cpu_to_be32(pd
? pd
->pdn
: to_mpd(dev
->devr
.p0
)->pdn
);
3106 context
->cqn_send
= send_cq
? cpu_to_be32(send_cq
->mcq
.cqn
) : 0;
3107 context
->cqn_recv
= recv_cq
? cpu_to_be32(recv_cq
->mcq
.cqn
) : 0;
3108 context
->params1
= cpu_to_be32(MLX5_IB_ACK_REQ_FREQ
<< 28);
3110 if (attr_mask
& IB_QP_RNR_RETRY
)
3111 context
->params1
|= cpu_to_be32(attr
->rnr_retry
<< 13);
3113 if (attr_mask
& IB_QP_RETRY_CNT
)
3114 context
->params1
|= cpu_to_be32(attr
->retry_cnt
<< 16);
3116 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
) {
3117 if (attr
->max_rd_atomic
)
3119 cpu_to_be32(fls(attr
->max_rd_atomic
- 1) << 21);
3122 if (attr_mask
& IB_QP_SQ_PSN
)
3123 context
->next_send_psn
= cpu_to_be32(attr
->sq_psn
);
3125 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
) {
3126 if (attr
->max_dest_rd_atomic
)
3128 cpu_to_be32(fls(attr
->max_dest_rd_atomic
- 1) << 21);
3131 if (attr_mask
& (IB_QP_ACCESS_FLAGS
| IB_QP_MAX_DEST_RD_ATOMIC
))
3132 context
->params2
|= to_mlx5_access_flags(qp
, attr
, attr_mask
);
3134 if (attr_mask
& IB_QP_MIN_RNR_TIMER
)
3135 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->min_rnr_timer
<< 24);
3137 if (attr_mask
& IB_QP_RQ_PSN
)
3138 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->rq_psn
);
3140 if (attr_mask
& IB_QP_QKEY
)
3141 context
->qkey
= cpu_to_be32(attr
->qkey
);
3143 if (qp
->rq
.wqe_cnt
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
3144 context
->db_rec_addr
= cpu_to_be64(qp
->db
.dma
);
3146 if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
) {
3147 u8 port_num
= (attr_mask
& IB_QP_PORT
? attr
->port_num
:
3150 /* Underlay port should be used - index 0 function per port */
3151 if (qp
->flags
& MLX5_IB_QP_UNDERLAY
)
3154 mibport
= &dev
->port
[port_num
];
3155 context
->qp_counter_set_usr_page
|=
3156 cpu_to_be32((u32
)(mibport
->cnts
.set_id
) << 24);
3159 if (!ibqp
->uobject
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
3160 context
->sq_crq_size
|= cpu_to_be16(1 << 4);
3162 if (qp
->flags
& MLX5_IB_QP_SQPN_QP1
)
3163 context
->deth_sqpn
= cpu_to_be32(1);
3165 mlx5_cur
= to_mlx5_state(cur_state
);
3166 mlx5_new
= to_mlx5_state(new_state
);
3168 if (mlx5_cur
>= MLX5_QP_NUM_STATE
|| mlx5_new
>= MLX5_QP_NUM_STATE
||
3169 !optab
[mlx5_cur
][mlx5_new
]) {
3174 op
= optab
[mlx5_cur
][mlx5_new
];
3175 optpar
= ib_mask_to_mlx5_opt(attr_mask
);
3176 optpar
&= opt_mask
[mlx5_cur
][mlx5_new
][mlx5_st
];
3178 if (qp
->ibqp
.qp_type
== IB_QPT_RAW_PACKET
||
3179 qp
->flags
& MLX5_IB_QP_UNDERLAY
) {
3180 struct mlx5_modify_raw_qp_param raw_qp_param
= {};
3182 raw_qp_param
.operation
= op
;
3183 if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
) {
3184 raw_qp_param
.rq_q_ctr_id
= mibport
->cnts
.set_id
;
3185 raw_qp_param
.set_mask
|= MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID
;
3188 if (attr_mask
& IB_QP_RATE_LIMIT
) {
3189 raw_qp_param
.rl
.rate
= attr
->rate_limit
;
3191 if (ucmd
->burst_info
.max_burst_sz
) {
3192 if (attr
->rate_limit
&&
3193 MLX5_CAP_QOS(dev
->mdev
, packet_pacing_burst_bound
)) {
3194 raw_qp_param
.rl
.max_burst_sz
=
3195 ucmd
->burst_info
.max_burst_sz
;
3202 if (ucmd
->burst_info
.typical_pkt_sz
) {
3203 if (attr
->rate_limit
&&
3204 MLX5_CAP_QOS(dev
->mdev
, packet_pacing_typical_size
)) {
3205 raw_qp_param
.rl
.typical_pkt_sz
=
3206 ucmd
->burst_info
.typical_pkt_sz
;
3213 raw_qp_param
.set_mask
|= MLX5_RAW_QP_RATE_LIMIT
;
3216 err
= modify_raw_packet_qp(dev
, qp
, &raw_qp_param
, tx_affinity
);
3218 err
= mlx5_core_qp_modify(dev
->mdev
, op
, optpar
, context
,
3225 qp
->state
= new_state
;
3227 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
3228 qp
->trans_qp
.atomic_rd_en
= attr
->qp_access_flags
;
3229 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
3230 qp
->trans_qp
.resp_depth
= attr
->max_dest_rd_atomic
;
3231 if (attr_mask
& IB_QP_PORT
)
3232 qp
->port
= attr
->port_num
;
3233 if (attr_mask
& IB_QP_ALT_PATH
)
3234 qp
->trans_qp
.alt_port
= attr
->alt_port_num
;
3237 * If we moved a kernel QP to RESET, clean up all old CQ
3238 * entries and reinitialize the QP.
3240 if (new_state
== IB_QPS_RESET
&&
3241 !ibqp
->uobject
&& ibqp
->qp_type
!= IB_QPT_XRC_TGT
) {
3242 mlx5_ib_cq_clean(recv_cq
, base
->mqp
.qpn
,
3243 ibqp
->srq
? to_msrq(ibqp
->srq
) : NULL
);
3244 if (send_cq
!= recv_cq
)
3245 mlx5_ib_cq_clean(send_cq
, base
->mqp
.qpn
, NULL
);
3251 qp
->sq
.cur_post
= 0;
3252 qp
->sq
.last_poll
= 0;
3253 qp
->db
.db
[MLX5_RCV_DBR
] = 0;
3254 qp
->db
.db
[MLX5_SND_DBR
] = 0;
3262 static inline bool is_valid_mask(int mask
, int req
, int opt
)
3264 if ((mask
& req
) != req
)
3267 if (mask
& ~(req
| opt
))
3273 /* check valid transition for driver QP types
3274 * for now the only QP type that this function supports is DCI
3276 static bool modify_dci_qp_is_ok(enum ib_qp_state cur_state
, enum ib_qp_state new_state
,
3277 enum ib_qp_attr_mask attr_mask
)
3279 int req
= IB_QP_STATE
;
3282 if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
) {
3283 req
|= IB_QP_PKEY_INDEX
| IB_QP_PORT
;
3284 return is_valid_mask(attr_mask
, req
, opt
);
3285 } else if (cur_state
== IB_QPS_INIT
&& new_state
== IB_QPS_INIT
) {
3286 opt
= IB_QP_PKEY_INDEX
| IB_QP_PORT
;
3287 return is_valid_mask(attr_mask
, req
, opt
);
3288 } else if (cur_state
== IB_QPS_INIT
&& new_state
== IB_QPS_RTR
) {
3289 req
|= IB_QP_PATH_MTU
;
3290 opt
= IB_QP_PKEY_INDEX
;
3291 return is_valid_mask(attr_mask
, req
, opt
);
3292 } else if (cur_state
== IB_QPS_RTR
&& new_state
== IB_QPS_RTS
) {
3293 req
|= IB_QP_TIMEOUT
| IB_QP_RETRY_CNT
| IB_QP_RNR_RETRY
|
3294 IB_QP_MAX_QP_RD_ATOMIC
| IB_QP_SQ_PSN
;
3295 opt
= IB_QP_MIN_RNR_TIMER
;
3296 return is_valid_mask(attr_mask
, req
, opt
);
3297 } else if (cur_state
== IB_QPS_RTS
&& new_state
== IB_QPS_RTS
) {
3298 opt
= IB_QP_MIN_RNR_TIMER
;
3299 return is_valid_mask(attr_mask
, req
, opt
);
3300 } else if (cur_state
!= IB_QPS_RESET
&& new_state
== IB_QPS_ERR
) {
3301 return is_valid_mask(attr_mask
, req
, opt
);
3306 /* mlx5_ib_modify_dct: modify a DCT QP
3307 * valid transitions are:
3308 * RESET to INIT: must set access_flags, pkey_index and port
3309 * INIT to RTR : must set min_rnr_timer, tclass, flow_label,
3310 * mtu, gid_index and hop_limit
3311 * Other transitions and attributes are illegal
3313 static int mlx5_ib_modify_dct(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
3314 int attr_mask
, struct ib_udata
*udata
)
3316 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
3317 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
3318 enum ib_qp_state cur_state
, new_state
;
3320 int required
= IB_QP_STATE
;
3323 if (!(attr_mask
& IB_QP_STATE
))
3326 cur_state
= qp
->state
;
3327 new_state
= attr
->qp_state
;
3329 dctc
= MLX5_ADDR_OF(create_dct_in
, qp
->dct
.in
, dct_context_entry
);
3330 if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
) {
3331 required
|= IB_QP_ACCESS_FLAGS
| IB_QP_PKEY_INDEX
| IB_QP_PORT
;
3332 if (!is_valid_mask(attr_mask
, required
, 0))
3335 if (attr
->port_num
== 0 ||
3336 attr
->port_num
> MLX5_CAP_GEN(dev
->mdev
, num_ports
)) {
3337 mlx5_ib_dbg(dev
, "invalid port number %d. number of ports is %d\n",
3338 attr
->port_num
, dev
->num_ports
);
3341 if (attr
->qp_access_flags
& IB_ACCESS_REMOTE_READ
)
3342 MLX5_SET(dctc
, dctc
, rre
, 1);
3343 if (attr
->qp_access_flags
& IB_ACCESS_REMOTE_WRITE
)
3344 MLX5_SET(dctc
, dctc
, rwe
, 1);
3345 if (attr
->qp_access_flags
& IB_ACCESS_REMOTE_ATOMIC
) {
3346 if (!mlx5_ib_dc_atomic_is_supported(dev
))
3348 MLX5_SET(dctc
, dctc
, rae
, 1);
3349 MLX5_SET(dctc
, dctc
, atomic_mode
, MLX5_ATOMIC_MODE_DCT_CX
);
3351 MLX5_SET(dctc
, dctc
, pkey_index
, attr
->pkey_index
);
3352 MLX5_SET(dctc
, dctc
, port
, attr
->port_num
);
3353 MLX5_SET(dctc
, dctc
, counter_set_id
, dev
->port
[attr
->port_num
- 1].cnts
.set_id
);
3355 } else if (cur_state
== IB_QPS_INIT
&& new_state
== IB_QPS_RTR
) {
3356 struct mlx5_ib_modify_qp_resp resp
= {};
3357 u32 min_resp_len
= offsetof(typeof(resp
), dctn
) +
3360 if (udata
->outlen
< min_resp_len
)
3362 resp
.response_length
= min_resp_len
;
3364 required
|= IB_QP_MIN_RNR_TIMER
| IB_QP_AV
| IB_QP_PATH_MTU
;
3365 if (!is_valid_mask(attr_mask
, required
, 0))
3367 MLX5_SET(dctc
, dctc
, min_rnr_nak
, attr
->min_rnr_timer
);
3368 MLX5_SET(dctc
, dctc
, tclass
, attr
->ah_attr
.grh
.traffic_class
);
3369 MLX5_SET(dctc
, dctc
, flow_label
, attr
->ah_attr
.grh
.flow_label
);
3370 MLX5_SET(dctc
, dctc
, mtu
, attr
->path_mtu
);
3371 MLX5_SET(dctc
, dctc
, my_addr_index
, attr
->ah_attr
.grh
.sgid_index
);
3372 MLX5_SET(dctc
, dctc
, hop_limit
, attr
->ah_attr
.grh
.hop_limit
);
3374 err
= mlx5_core_create_dct(dev
->mdev
, &qp
->dct
.mdct
, qp
->dct
.in
,
3375 MLX5_ST_SZ_BYTES(create_dct_in
));
3378 resp
.dctn
= qp
->dct
.mdct
.mqp
.qpn
;
3379 err
= ib_copy_to_udata(udata
, &resp
, resp
.response_length
);
3381 mlx5_core_destroy_dct(dev
->mdev
, &qp
->dct
.mdct
);
3385 mlx5_ib_warn(dev
, "Modify DCT: Invalid transition from %d to %d\n", cur_state
, new_state
);
3389 qp
->state
= IB_QPS_ERR
;
3391 qp
->state
= new_state
;
3395 int mlx5_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
3396 int attr_mask
, struct ib_udata
*udata
)
3398 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
3399 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
3400 struct mlx5_ib_modify_qp ucmd
= {};
3401 enum ib_qp_type qp_type
;
3402 enum ib_qp_state cur_state
, new_state
;
3403 size_t required_cmd_sz
;
3406 enum rdma_link_layer ll
= IB_LINK_LAYER_UNSPECIFIED
;
3408 if (ibqp
->rwq_ind_tbl
)
3411 if (udata
&& udata
->inlen
) {
3412 required_cmd_sz
= offsetof(typeof(ucmd
), reserved
) +
3413 sizeof(ucmd
.reserved
);
3414 if (udata
->inlen
< required_cmd_sz
)
3417 if (udata
->inlen
> sizeof(ucmd
) &&
3418 !ib_is_udata_cleared(udata
, sizeof(ucmd
),
3419 udata
->inlen
- sizeof(ucmd
)))
3422 if (ib_copy_from_udata(&ucmd
, udata
,
3423 min(udata
->inlen
, sizeof(ucmd
))))
3426 if (ucmd
.comp_mask
||
3427 memchr_inv(&ucmd
.reserved
, 0, sizeof(ucmd
.reserved
)) ||
3428 memchr_inv(&ucmd
.burst_info
.reserved
, 0,
3429 sizeof(ucmd
.burst_info
.reserved
)))
3433 if (unlikely(ibqp
->qp_type
== IB_QPT_GSI
))
3434 return mlx5_ib_gsi_modify_qp(ibqp
, attr
, attr_mask
);
3436 if (ibqp
->qp_type
== IB_QPT_DRIVER
)
3437 qp_type
= qp
->qp_sub_type
;
3439 qp_type
= (unlikely(ibqp
->qp_type
== MLX5_IB_QPT_HW_GSI
)) ?
3440 IB_QPT_GSI
: ibqp
->qp_type
;
3442 if (qp_type
== MLX5_IB_QPT_DCT
)
3443 return mlx5_ib_modify_dct(ibqp
, attr
, attr_mask
, udata
);
3445 mutex_lock(&qp
->mutex
);
3447 cur_state
= attr_mask
& IB_QP_CUR_STATE
? attr
->cur_qp_state
: qp
->state
;
3448 new_state
= attr_mask
& IB_QP_STATE
? attr
->qp_state
: cur_state
;
3450 if (!(cur_state
== new_state
&& cur_state
== IB_QPS_RESET
)) {
3451 port
= attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
;
3452 ll
= dev
->ib_dev
.get_link_layer(&dev
->ib_dev
, port
);
3455 if (qp
->flags
& MLX5_IB_QP_UNDERLAY
) {
3456 if (attr_mask
& ~(IB_QP_STATE
| IB_QP_CUR_STATE
)) {
3457 mlx5_ib_dbg(dev
, "invalid attr_mask 0x%x when underlay QP is used\n",
3461 } else if (qp_type
!= MLX5_IB_QPT_REG_UMR
&&
3462 qp_type
!= MLX5_IB_QPT_DCI
&&
3463 !ib_modify_qp_is_ok(cur_state
, new_state
, qp_type
, attr_mask
, ll
)) {
3464 mlx5_ib_dbg(dev
, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n",
3465 cur_state
, new_state
, ibqp
->qp_type
, attr_mask
);
3467 } else if (qp_type
== MLX5_IB_QPT_DCI
&&
3468 !modify_dci_qp_is_ok(cur_state
, new_state
, attr_mask
)) {
3469 mlx5_ib_dbg(dev
, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n",
3470 cur_state
, new_state
, qp_type
, attr_mask
);
3474 if ((attr_mask
& IB_QP_PORT
) &&
3475 (attr
->port_num
== 0 ||
3476 attr
->port_num
> dev
->num_ports
)) {
3477 mlx5_ib_dbg(dev
, "invalid port number %d. number of ports is %d\n",
3478 attr
->port_num
, dev
->num_ports
);
3482 if (attr_mask
& IB_QP_PKEY_INDEX
) {
3483 port
= attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
;
3484 if (attr
->pkey_index
>=
3485 dev
->mdev
->port_caps
[port
- 1].pkey_table_len
) {
3486 mlx5_ib_dbg(dev
, "invalid pkey index %d\n",
3492 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
&&
3493 attr
->max_rd_atomic
>
3494 (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_ra_res_qp
))) {
3495 mlx5_ib_dbg(dev
, "invalid max_rd_atomic value %d\n",
3496 attr
->max_rd_atomic
);
3500 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
&&
3501 attr
->max_dest_rd_atomic
>
3502 (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_ra_req_qp
))) {
3503 mlx5_ib_dbg(dev
, "invalid max_dest_rd_atomic value %d\n",
3504 attr
->max_dest_rd_atomic
);
3508 if (cur_state
== new_state
&& cur_state
== IB_QPS_RESET
) {
3513 err
= __mlx5_ib_modify_qp(ibqp
, attr
, attr_mask
, cur_state
,
3517 mutex_unlock(&qp
->mutex
);
3521 static int mlx5_wq_overflow(struct mlx5_ib_wq
*wq
, int nreq
, struct ib_cq
*ib_cq
)
3523 struct mlx5_ib_cq
*cq
;
3526 cur
= wq
->head
- wq
->tail
;
3527 if (likely(cur
+ nreq
< wq
->max_post
))
3531 spin_lock(&cq
->lock
);
3532 cur
= wq
->head
- wq
->tail
;
3533 spin_unlock(&cq
->lock
);
3535 return cur
+ nreq
>= wq
->max_post
;
3538 static __always_inline
void set_raddr_seg(struct mlx5_wqe_raddr_seg
*rseg
,
3539 u64 remote_addr
, u32 rkey
)
3541 rseg
->raddr
= cpu_to_be64(remote_addr
);
3542 rseg
->rkey
= cpu_to_be32(rkey
);
3546 static void *set_eth_seg(struct mlx5_wqe_eth_seg
*eseg
,
3547 const struct ib_send_wr
*wr
, void *qend
,
3548 struct mlx5_ib_qp
*qp
, int *size
)
3552 memset(eseg
, 0, sizeof(struct mlx5_wqe_eth_seg
));
3554 if (wr
->send_flags
& IB_SEND_IP_CSUM
)
3555 eseg
->cs_flags
= MLX5_ETH_WQE_L3_CSUM
|
3556 MLX5_ETH_WQE_L4_CSUM
;
3558 seg
+= sizeof(struct mlx5_wqe_eth_seg
);
3559 *size
+= sizeof(struct mlx5_wqe_eth_seg
) / 16;
3561 if (wr
->opcode
== IB_WR_LSO
) {
3562 struct ib_ud_wr
*ud_wr
= container_of(wr
, struct ib_ud_wr
, wr
);
3563 int size_of_inl_hdr_start
= sizeof(eseg
->inline_hdr
.start
);
3564 u64 left
, leftlen
, copysz
;
3565 void *pdata
= ud_wr
->header
;
3568 eseg
->mss
= cpu_to_be16(ud_wr
->mss
);
3569 eseg
->inline_hdr
.sz
= cpu_to_be16(left
);
3572 * check if there is space till the end of queue, if yes,
3573 * copy all in one shot, otherwise copy till the end of queue,
3574 * rollback and than the copy the left
3576 leftlen
= qend
- (void *)eseg
->inline_hdr
.start
;
3577 copysz
= min_t(u64
, leftlen
, left
);
3579 memcpy(seg
- size_of_inl_hdr_start
, pdata
, copysz
);
3581 if (likely(copysz
> size_of_inl_hdr_start
)) {
3582 seg
+= ALIGN(copysz
- size_of_inl_hdr_start
, 16);
3583 *size
+= ALIGN(copysz
- size_of_inl_hdr_start
, 16) / 16;
3586 if (unlikely(copysz
< left
)) { /* the last wqe in the queue */
3587 seg
= mlx5_get_send_wqe(qp
, 0);
3590 memcpy(seg
, pdata
, left
);
3591 seg
+= ALIGN(left
, 16);
3592 *size
+= ALIGN(left
, 16) / 16;
3599 static void set_datagram_seg(struct mlx5_wqe_datagram_seg
*dseg
,
3600 const struct ib_send_wr
*wr
)
3602 memcpy(&dseg
->av
, &to_mah(ud_wr(wr
)->ah
)->av
, sizeof(struct mlx5_av
));
3603 dseg
->av
.dqp_dct
= cpu_to_be32(ud_wr(wr
)->remote_qpn
| MLX5_EXTENDED_UD_AV
);
3604 dseg
->av
.key
.qkey
.qkey
= cpu_to_be32(ud_wr(wr
)->remote_qkey
);
3607 static void set_data_ptr_seg(struct mlx5_wqe_data_seg
*dseg
, struct ib_sge
*sg
)
3609 dseg
->byte_count
= cpu_to_be32(sg
->length
);
3610 dseg
->lkey
= cpu_to_be32(sg
->lkey
);
3611 dseg
->addr
= cpu_to_be64(sg
->addr
);
3614 static u64
get_xlt_octo(u64 bytes
)
3616 return ALIGN(bytes
, MLX5_IB_UMR_XLT_ALIGNMENT
) /
3617 MLX5_IB_UMR_OCTOWORD
;
3620 static __be64
frwr_mkey_mask(void)
3624 result
= MLX5_MKEY_MASK_LEN
|
3625 MLX5_MKEY_MASK_PAGE_SIZE
|
3626 MLX5_MKEY_MASK_START_ADDR
|
3627 MLX5_MKEY_MASK_EN_RINVAL
|
3628 MLX5_MKEY_MASK_KEY
|
3634 MLX5_MKEY_MASK_SMALL_FENCE
|
3635 MLX5_MKEY_MASK_FREE
;
3637 return cpu_to_be64(result
);
3640 static __be64
sig_mkey_mask(void)
3644 result
= MLX5_MKEY_MASK_LEN
|
3645 MLX5_MKEY_MASK_PAGE_SIZE
|
3646 MLX5_MKEY_MASK_START_ADDR
|
3647 MLX5_MKEY_MASK_EN_SIGERR
|
3648 MLX5_MKEY_MASK_EN_RINVAL
|
3649 MLX5_MKEY_MASK_KEY
|
3654 MLX5_MKEY_MASK_SMALL_FENCE
|
3655 MLX5_MKEY_MASK_FREE
|
3656 MLX5_MKEY_MASK_BSF_EN
;
3658 return cpu_to_be64(result
);
3661 static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg
*umr
,
3662 struct mlx5_ib_mr
*mr
, bool umr_inline
)
3664 int size
= mr
->ndescs
* mr
->desc_size
;
3666 memset(umr
, 0, sizeof(*umr
));
3668 umr
->flags
= MLX5_UMR_CHECK_NOT_FREE
;
3670 umr
->flags
|= MLX5_UMR_INLINE
;
3671 umr
->xlt_octowords
= cpu_to_be16(get_xlt_octo(size
));
3672 umr
->mkey_mask
= frwr_mkey_mask();
3675 static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg
*umr
)
3677 memset(umr
, 0, sizeof(*umr
));
3678 umr
->mkey_mask
= cpu_to_be64(MLX5_MKEY_MASK_FREE
);
3679 umr
->flags
= MLX5_UMR_INLINE
;
3682 static __be64
get_umr_enable_mr_mask(void)
3686 result
= MLX5_MKEY_MASK_KEY
|
3687 MLX5_MKEY_MASK_FREE
;
3689 return cpu_to_be64(result
);
3692 static __be64
get_umr_disable_mr_mask(void)
3696 result
= MLX5_MKEY_MASK_FREE
;
3698 return cpu_to_be64(result
);
3701 static __be64
get_umr_update_translation_mask(void)
3705 result
= MLX5_MKEY_MASK_LEN
|
3706 MLX5_MKEY_MASK_PAGE_SIZE
|
3707 MLX5_MKEY_MASK_START_ADDR
;
3709 return cpu_to_be64(result
);
3712 static __be64
get_umr_update_access_mask(int atomic
)
3716 result
= MLX5_MKEY_MASK_LR
|
3722 result
|= MLX5_MKEY_MASK_A
;
3724 return cpu_to_be64(result
);
3727 static __be64
get_umr_update_pd_mask(void)
3731 result
= MLX5_MKEY_MASK_PD
;
3733 return cpu_to_be64(result
);
3736 static int umr_check_mkey_mask(struct mlx5_ib_dev
*dev
, u64 mask
)
3738 if ((mask
& MLX5_MKEY_MASK_PAGE_SIZE
&&
3739 MLX5_CAP_GEN(dev
->mdev
, umr_modify_entity_size_disabled
)) ||
3740 (mask
& MLX5_MKEY_MASK_A
&&
3741 MLX5_CAP_GEN(dev
->mdev
, umr_modify_atomic_disabled
)))
3746 static int set_reg_umr_segment(struct mlx5_ib_dev
*dev
,
3747 struct mlx5_wqe_umr_ctrl_seg
*umr
,
3748 const struct ib_send_wr
*wr
, int atomic
)
3750 const struct mlx5_umr_wr
*umrwr
= umr_wr(wr
);
3752 memset(umr
, 0, sizeof(*umr
));
3754 if (wr
->send_flags
& MLX5_IB_SEND_UMR_FAIL_IF_FREE
)
3755 umr
->flags
= MLX5_UMR_CHECK_FREE
; /* fail if free */
3757 umr
->flags
= MLX5_UMR_CHECK_NOT_FREE
; /* fail if not free */
3759 umr
->xlt_octowords
= cpu_to_be16(get_xlt_octo(umrwr
->xlt_size
));
3760 if (wr
->send_flags
& MLX5_IB_SEND_UMR_UPDATE_XLT
) {
3761 u64 offset
= get_xlt_octo(umrwr
->offset
);
3763 umr
->xlt_offset
= cpu_to_be16(offset
& 0xffff);
3764 umr
->xlt_offset_47_16
= cpu_to_be32(offset
>> 16);
3765 umr
->flags
|= MLX5_UMR_TRANSLATION_OFFSET_EN
;
3767 if (wr
->send_flags
& MLX5_IB_SEND_UMR_UPDATE_TRANSLATION
)
3768 umr
->mkey_mask
|= get_umr_update_translation_mask();
3769 if (wr
->send_flags
& MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS
) {
3770 umr
->mkey_mask
|= get_umr_update_access_mask(atomic
);
3771 umr
->mkey_mask
|= get_umr_update_pd_mask();
3773 if (wr
->send_flags
& MLX5_IB_SEND_UMR_ENABLE_MR
)
3774 umr
->mkey_mask
|= get_umr_enable_mr_mask();
3775 if (wr
->send_flags
& MLX5_IB_SEND_UMR_DISABLE_MR
)
3776 umr
->mkey_mask
|= get_umr_disable_mr_mask();
3779 umr
->flags
|= MLX5_UMR_INLINE
;
3781 return umr_check_mkey_mask(dev
, be64_to_cpu(umr
->mkey_mask
));
3784 static u8
get_umr_flags(int acc
)
3786 return (acc
& IB_ACCESS_REMOTE_ATOMIC
? MLX5_PERM_ATOMIC
: 0) |
3787 (acc
& IB_ACCESS_REMOTE_WRITE
? MLX5_PERM_REMOTE_WRITE
: 0) |
3788 (acc
& IB_ACCESS_REMOTE_READ
? MLX5_PERM_REMOTE_READ
: 0) |
3789 (acc
& IB_ACCESS_LOCAL_WRITE
? MLX5_PERM_LOCAL_WRITE
: 0) |
3790 MLX5_PERM_LOCAL_READ
| MLX5_PERM_UMR_EN
;
3793 static void set_reg_mkey_seg(struct mlx5_mkey_seg
*seg
,
3794 struct mlx5_ib_mr
*mr
,
3795 u32 key
, int access
)
3797 int ndescs
= ALIGN(mr
->ndescs
, 8) >> 1;
3799 memset(seg
, 0, sizeof(*seg
));
3801 if (mr
->access_mode
== MLX5_MKC_ACCESS_MODE_MTT
)
3802 seg
->log2_page_size
= ilog2(mr
->ibmr
.page_size
);
3803 else if (mr
->access_mode
== MLX5_MKC_ACCESS_MODE_KLMS
)
3804 /* KLMs take twice the size of MTTs */
3807 seg
->flags
= get_umr_flags(access
) | mr
->access_mode
;
3808 seg
->qpn_mkey7_0
= cpu_to_be32((key
& 0xff) | 0xffffff00);
3809 seg
->flags_pd
= cpu_to_be32(MLX5_MKEY_REMOTE_INVAL
);
3810 seg
->start_addr
= cpu_to_be64(mr
->ibmr
.iova
);
3811 seg
->len
= cpu_to_be64(mr
->ibmr
.length
);
3812 seg
->xlt_oct_size
= cpu_to_be32(ndescs
);
3815 static void set_linv_mkey_seg(struct mlx5_mkey_seg
*seg
)
3817 memset(seg
, 0, sizeof(*seg
));
3818 seg
->status
= MLX5_MKEY_STATUS_FREE
;
3821 static void set_reg_mkey_segment(struct mlx5_mkey_seg
*seg
,
3822 const struct ib_send_wr
*wr
)
3824 const struct mlx5_umr_wr
*umrwr
= umr_wr(wr
);
3826 memset(seg
, 0, sizeof(*seg
));
3827 if (wr
->send_flags
& MLX5_IB_SEND_UMR_DISABLE_MR
)
3828 seg
->status
= MLX5_MKEY_STATUS_FREE
;
3830 seg
->flags
= convert_access(umrwr
->access_flags
);
3832 seg
->flags_pd
= cpu_to_be32(to_mpd(umrwr
->pd
)->pdn
);
3833 if (wr
->send_flags
& MLX5_IB_SEND_UMR_UPDATE_TRANSLATION
&&
3835 seg
->flags_pd
|= cpu_to_be32(MLX5_MKEY_LEN64
);
3837 seg
->start_addr
= cpu_to_be64(umrwr
->virt_addr
);
3838 seg
->len
= cpu_to_be64(umrwr
->length
);
3839 seg
->log2_page_size
= umrwr
->page_shift
;
3840 seg
->qpn_mkey7_0
= cpu_to_be32(0xffffff00 |
3841 mlx5_mkey_variant(umrwr
->mkey
));
3844 static void set_reg_data_seg(struct mlx5_wqe_data_seg
*dseg
,
3845 struct mlx5_ib_mr
*mr
,
3846 struct mlx5_ib_pd
*pd
)
3848 int bcount
= mr
->desc_size
* mr
->ndescs
;
3850 dseg
->addr
= cpu_to_be64(mr
->desc_map
);
3851 dseg
->byte_count
= cpu_to_be32(ALIGN(bcount
, 64));
3852 dseg
->lkey
= cpu_to_be32(pd
->ibpd
.local_dma_lkey
);
3855 static void set_reg_umr_inline_seg(void *seg
, struct mlx5_ib_qp
*qp
,
3856 struct mlx5_ib_mr
*mr
, int mr_list_size
)
3858 void *qend
= qp
->sq
.qend
;
3859 void *addr
= mr
->descs
;
3862 if (unlikely(seg
+ mr_list_size
> qend
)) {
3864 memcpy(seg
, addr
, copy
);
3866 mr_list_size
-= copy
;
3867 seg
= mlx5_get_send_wqe(qp
, 0);
3869 memcpy(seg
, addr
, mr_list_size
);
3870 seg
+= mr_list_size
;
3873 static __be32
send_ieth(const struct ib_send_wr
*wr
)
3875 switch (wr
->opcode
) {
3876 case IB_WR_SEND_WITH_IMM
:
3877 case IB_WR_RDMA_WRITE_WITH_IMM
:
3878 return wr
->ex
.imm_data
;
3880 case IB_WR_SEND_WITH_INV
:
3881 return cpu_to_be32(wr
->ex
.invalidate_rkey
);
3888 static u8
calc_sig(void *wqe
, int size
)
3894 for (i
= 0; i
< size
; i
++)
3900 static u8
wq_sig(void *wqe
)
3902 return calc_sig(wqe
, (*((u8
*)wqe
+ 8) & 0x3f) << 4);
3905 static int set_data_inl_seg(struct mlx5_ib_qp
*qp
, const struct ib_send_wr
*wr
,
3908 struct mlx5_wqe_inline_seg
*seg
;
3909 void *qend
= qp
->sq
.qend
;
3917 wqe
+= sizeof(*seg
);
3918 for (i
= 0; i
< wr
->num_sge
; i
++) {
3919 addr
= (void *)(unsigned long)(wr
->sg_list
[i
].addr
);
3920 len
= wr
->sg_list
[i
].length
;
3923 if (unlikely(inl
> qp
->max_inline_data
))
3926 if (unlikely(wqe
+ len
> qend
)) {
3928 memcpy(wqe
, addr
, copy
);
3931 wqe
= mlx5_get_send_wqe(qp
, 0);
3933 memcpy(wqe
, addr
, len
);
3937 seg
->byte_count
= cpu_to_be32(inl
| MLX5_INLINE_SEG
);
3939 *sz
= ALIGN(inl
+ sizeof(seg
->byte_count
), 16) / 16;
3944 static u16
prot_field_size(enum ib_signature_type type
)
3947 case IB_SIG_TYPE_T10_DIF
:
3948 return MLX5_DIF_SIZE
;
3954 static u8
bs_selector(int block_size
)
3956 switch (block_size
) {
3957 case 512: return 0x1;
3958 case 520: return 0x2;
3959 case 4096: return 0x3;
3960 case 4160: return 0x4;
3961 case 1073741824: return 0x5;
3966 static void mlx5_fill_inl_bsf(struct ib_sig_domain
*domain
,
3967 struct mlx5_bsf_inl
*inl
)
3969 /* Valid inline section and allow BSF refresh */
3970 inl
->vld_refresh
= cpu_to_be16(MLX5_BSF_INL_VALID
|
3971 MLX5_BSF_REFRESH_DIF
);
3972 inl
->dif_apptag
= cpu_to_be16(domain
->sig
.dif
.app_tag
);
3973 inl
->dif_reftag
= cpu_to_be32(domain
->sig
.dif
.ref_tag
);
3974 /* repeating block */
3975 inl
->rp_inv_seed
= MLX5_BSF_REPEAT_BLOCK
;
3976 inl
->sig_type
= domain
->sig
.dif
.bg_type
== IB_T10DIF_CRC
?
3977 MLX5_DIF_CRC
: MLX5_DIF_IPCS
;
3979 if (domain
->sig
.dif
.ref_remap
)
3980 inl
->dif_inc_ref_guard_check
|= MLX5_BSF_INC_REFTAG
;
3982 if (domain
->sig
.dif
.app_escape
) {
3983 if (domain
->sig
.dif
.ref_escape
)
3984 inl
->dif_inc_ref_guard_check
|= MLX5_BSF_APPREF_ESCAPE
;
3986 inl
->dif_inc_ref_guard_check
|= MLX5_BSF_APPTAG_ESCAPE
;
3989 inl
->dif_app_bitmask_check
=
3990 cpu_to_be16(domain
->sig
.dif
.apptag_check_mask
);
3993 static int mlx5_set_bsf(struct ib_mr
*sig_mr
,
3994 struct ib_sig_attrs
*sig_attrs
,
3995 struct mlx5_bsf
*bsf
, u32 data_size
)
3997 struct mlx5_core_sig_ctx
*msig
= to_mmr(sig_mr
)->sig
;
3998 struct mlx5_bsf_basic
*basic
= &bsf
->basic
;
3999 struct ib_sig_domain
*mem
= &sig_attrs
->mem
;
4000 struct ib_sig_domain
*wire
= &sig_attrs
->wire
;
4002 memset(bsf
, 0, sizeof(*bsf
));
4004 /* Basic + Extended + Inline */
4005 basic
->bsf_size_sbs
= 1 << 7;
4006 /* Input domain check byte mask */
4007 basic
->check_byte_mask
= sig_attrs
->check_mask
;
4008 basic
->raw_data_size
= cpu_to_be32(data_size
);
4011 switch (sig_attrs
->mem
.sig_type
) {
4012 case IB_SIG_TYPE_NONE
:
4014 case IB_SIG_TYPE_T10_DIF
:
4015 basic
->mem
.bs_selector
= bs_selector(mem
->sig
.dif
.pi_interval
);
4016 basic
->m_bfs_psv
= cpu_to_be32(msig
->psv_memory
.psv_idx
);
4017 mlx5_fill_inl_bsf(mem
, &bsf
->m_inl
);
4024 switch (sig_attrs
->wire
.sig_type
) {
4025 case IB_SIG_TYPE_NONE
:
4027 case IB_SIG_TYPE_T10_DIF
:
4028 if (mem
->sig
.dif
.pi_interval
== wire
->sig
.dif
.pi_interval
&&
4029 mem
->sig_type
== wire
->sig_type
) {
4030 /* Same block structure */
4031 basic
->bsf_size_sbs
|= 1 << 4;
4032 if (mem
->sig
.dif
.bg_type
== wire
->sig
.dif
.bg_type
)
4033 basic
->wire
.copy_byte_mask
|= MLX5_CPY_GRD_MASK
;
4034 if (mem
->sig
.dif
.app_tag
== wire
->sig
.dif
.app_tag
)
4035 basic
->wire
.copy_byte_mask
|= MLX5_CPY_APP_MASK
;
4036 if (mem
->sig
.dif
.ref_tag
== wire
->sig
.dif
.ref_tag
)
4037 basic
->wire
.copy_byte_mask
|= MLX5_CPY_REF_MASK
;
4039 basic
->wire
.bs_selector
= bs_selector(wire
->sig
.dif
.pi_interval
);
4041 basic
->w_bfs_psv
= cpu_to_be32(msig
->psv_wire
.psv_idx
);
4042 mlx5_fill_inl_bsf(wire
, &bsf
->w_inl
);
4051 static int set_sig_data_segment(const struct ib_sig_handover_wr
*wr
,
4052 struct mlx5_ib_qp
*qp
, void **seg
, int *size
)
4054 struct ib_sig_attrs
*sig_attrs
= wr
->sig_attrs
;
4055 struct ib_mr
*sig_mr
= wr
->sig_mr
;
4056 struct mlx5_bsf
*bsf
;
4057 u32 data_len
= wr
->wr
.sg_list
->length
;
4058 u32 data_key
= wr
->wr
.sg_list
->lkey
;
4059 u64 data_va
= wr
->wr
.sg_list
->addr
;
4064 (data_key
== wr
->prot
->lkey
&&
4065 data_va
== wr
->prot
->addr
&&
4066 data_len
== wr
->prot
->length
)) {
4068 * Source domain doesn't contain signature information
4069 * or data and protection are interleaved in memory.
4070 * So need construct:
4071 * ------------------
4073 * ------------------
4075 * ------------------
4077 struct mlx5_klm
*data_klm
= *seg
;
4079 data_klm
->bcount
= cpu_to_be32(data_len
);
4080 data_klm
->key
= cpu_to_be32(data_key
);
4081 data_klm
->va
= cpu_to_be64(data_va
);
4082 wqe_size
= ALIGN(sizeof(*data_klm
), 64);
4085 * Source domain contains signature information
4086 * So need construct a strided block format:
4087 * ---------------------------
4088 * | stride_block_ctrl |
4089 * ---------------------------
4091 * ---------------------------
4093 * ---------------------------
4095 * ---------------------------
4097 struct mlx5_stride_block_ctrl_seg
*sblock_ctrl
;
4098 struct mlx5_stride_block_entry
*data_sentry
;
4099 struct mlx5_stride_block_entry
*prot_sentry
;
4100 u32 prot_key
= wr
->prot
->lkey
;
4101 u64 prot_va
= wr
->prot
->addr
;
4102 u16 block_size
= sig_attrs
->mem
.sig
.dif
.pi_interval
;
4106 data_sentry
= (void *)sblock_ctrl
+ sizeof(*sblock_ctrl
);
4107 prot_sentry
= (void *)data_sentry
+ sizeof(*data_sentry
);
4109 prot_size
= prot_field_size(sig_attrs
->mem
.sig_type
);
4111 pr_err("Bad block size given: %u\n", block_size
);
4114 sblock_ctrl
->bcount_per_cycle
= cpu_to_be32(block_size
+
4116 sblock_ctrl
->op
= cpu_to_be32(MLX5_STRIDE_BLOCK_OP
);
4117 sblock_ctrl
->repeat_count
= cpu_to_be32(data_len
/ block_size
);
4118 sblock_ctrl
->num_entries
= cpu_to_be16(2);
4120 data_sentry
->bcount
= cpu_to_be16(block_size
);
4121 data_sentry
->key
= cpu_to_be32(data_key
);
4122 data_sentry
->va
= cpu_to_be64(data_va
);
4123 data_sentry
->stride
= cpu_to_be16(block_size
);
4125 prot_sentry
->bcount
= cpu_to_be16(prot_size
);
4126 prot_sentry
->key
= cpu_to_be32(prot_key
);
4127 prot_sentry
->va
= cpu_to_be64(prot_va
);
4128 prot_sentry
->stride
= cpu_to_be16(prot_size
);
4130 wqe_size
= ALIGN(sizeof(*sblock_ctrl
) + sizeof(*data_sentry
) +
4131 sizeof(*prot_sentry
), 64);
4135 *size
+= wqe_size
/ 16;
4136 if (unlikely((*seg
== qp
->sq
.qend
)))
4137 *seg
= mlx5_get_send_wqe(qp
, 0);
4140 ret
= mlx5_set_bsf(sig_mr
, sig_attrs
, bsf
, data_len
);
4144 *seg
+= sizeof(*bsf
);
4145 *size
+= sizeof(*bsf
) / 16;
4146 if (unlikely((*seg
== qp
->sq
.qend
)))
4147 *seg
= mlx5_get_send_wqe(qp
, 0);
4152 static void set_sig_mkey_segment(struct mlx5_mkey_seg
*seg
,
4153 const struct ib_sig_handover_wr
*wr
, u32 size
,
4154 u32 length
, u32 pdn
)
4156 struct ib_mr
*sig_mr
= wr
->sig_mr
;
4157 u32 sig_key
= sig_mr
->rkey
;
4158 u8 sigerr
= to_mmr(sig_mr
)->sig
->sigerr_count
& 1;
4160 memset(seg
, 0, sizeof(*seg
));
4162 seg
->flags
= get_umr_flags(wr
->access_flags
) |
4163 MLX5_MKC_ACCESS_MODE_KLMS
;
4164 seg
->qpn_mkey7_0
= cpu_to_be32((sig_key
& 0xff) | 0xffffff00);
4165 seg
->flags_pd
= cpu_to_be32(MLX5_MKEY_REMOTE_INVAL
| sigerr
<< 26 |
4166 MLX5_MKEY_BSF_EN
| pdn
);
4167 seg
->len
= cpu_to_be64(length
);
4168 seg
->xlt_oct_size
= cpu_to_be32(get_xlt_octo(size
));
4169 seg
->bsfs_octo_size
= cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE
);
4172 static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg
*umr
,
4175 memset(umr
, 0, sizeof(*umr
));
4177 umr
->flags
= MLX5_FLAGS_INLINE
| MLX5_FLAGS_CHECK_FREE
;
4178 umr
->xlt_octowords
= cpu_to_be16(get_xlt_octo(size
));
4179 umr
->bsf_octowords
= cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE
);
4180 umr
->mkey_mask
= sig_mkey_mask();
4184 static int set_sig_umr_wr(const struct ib_send_wr
*send_wr
,
4185 struct mlx5_ib_qp
*qp
, void **seg
, int *size
)
4187 const struct ib_sig_handover_wr
*wr
= sig_handover_wr(send_wr
);
4188 struct mlx5_ib_mr
*sig_mr
= to_mmr(wr
->sig_mr
);
4189 u32 pdn
= get_pd(qp
)->pdn
;
4191 int region_len
, ret
;
4193 if (unlikely(wr
->wr
.num_sge
!= 1) ||
4194 unlikely(wr
->access_flags
& IB_ACCESS_REMOTE_ATOMIC
) ||
4195 unlikely(!sig_mr
->sig
) || unlikely(!qp
->signature_en
) ||
4196 unlikely(!sig_mr
->sig
->sig_status_checked
))
4199 /* length of the protected region, data + protection */
4200 region_len
= wr
->wr
.sg_list
->length
;
4202 (wr
->prot
->lkey
!= wr
->wr
.sg_list
->lkey
||
4203 wr
->prot
->addr
!= wr
->wr
.sg_list
->addr
||
4204 wr
->prot
->length
!= wr
->wr
.sg_list
->length
))
4205 region_len
+= wr
->prot
->length
;
4208 * KLM octoword size - if protection was provided
4209 * then we use strided block format (3 octowords),
4210 * else we use single KLM (1 octoword)
4212 xlt_size
= wr
->prot
? 0x30 : sizeof(struct mlx5_klm
);
4214 set_sig_umr_segment(*seg
, xlt_size
);
4215 *seg
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
);
4216 *size
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
) / 16;
4217 if (unlikely((*seg
== qp
->sq
.qend
)))
4218 *seg
= mlx5_get_send_wqe(qp
, 0);
4220 set_sig_mkey_segment(*seg
, wr
, xlt_size
, region_len
, pdn
);
4221 *seg
+= sizeof(struct mlx5_mkey_seg
);
4222 *size
+= sizeof(struct mlx5_mkey_seg
) / 16;
4223 if (unlikely((*seg
== qp
->sq
.qend
)))
4224 *seg
= mlx5_get_send_wqe(qp
, 0);
4226 ret
= set_sig_data_segment(wr
, qp
, seg
, size
);
4230 sig_mr
->sig
->sig_status_checked
= false;
4234 static int set_psv_wr(struct ib_sig_domain
*domain
,
4235 u32 psv_idx
, void **seg
, int *size
)
4237 struct mlx5_seg_set_psv
*psv_seg
= *seg
;
4239 memset(psv_seg
, 0, sizeof(*psv_seg
));
4240 psv_seg
->psv_num
= cpu_to_be32(psv_idx
);
4241 switch (domain
->sig_type
) {
4242 case IB_SIG_TYPE_NONE
:
4244 case IB_SIG_TYPE_T10_DIF
:
4245 psv_seg
->transient_sig
= cpu_to_be32(domain
->sig
.dif
.bg
<< 16 |
4246 domain
->sig
.dif
.app_tag
);
4247 psv_seg
->ref_tag
= cpu_to_be32(domain
->sig
.dif
.ref_tag
);
4250 pr_err("Bad signature type (%d) is given.\n",
4255 *seg
+= sizeof(*psv_seg
);
4256 *size
+= sizeof(*psv_seg
) / 16;
4261 static int set_reg_wr(struct mlx5_ib_qp
*qp
,
4262 const struct ib_reg_wr
*wr
,
4263 void **seg
, int *size
)
4265 struct mlx5_ib_mr
*mr
= to_mmr(wr
->mr
);
4266 struct mlx5_ib_pd
*pd
= to_mpd(qp
->ibqp
.pd
);
4267 int mr_list_size
= mr
->ndescs
* mr
->desc_size
;
4268 bool umr_inline
= mr_list_size
<= MLX5_IB_SQ_UMR_INLINE_THRESHOLD
;
4270 if (unlikely(wr
->wr
.send_flags
& IB_SEND_INLINE
)) {
4271 mlx5_ib_warn(to_mdev(qp
->ibqp
.device
),
4272 "Invalid IB_SEND_INLINE send flag\n");
4276 set_reg_umr_seg(*seg
, mr
, umr_inline
);
4277 *seg
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
);
4278 *size
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
) / 16;
4279 if (unlikely((*seg
== qp
->sq
.qend
)))
4280 *seg
= mlx5_get_send_wqe(qp
, 0);
4282 set_reg_mkey_seg(*seg
, mr
, wr
->key
, wr
->access
);
4283 *seg
+= sizeof(struct mlx5_mkey_seg
);
4284 *size
+= sizeof(struct mlx5_mkey_seg
) / 16;
4285 if (unlikely((*seg
== qp
->sq
.qend
)))
4286 *seg
= mlx5_get_send_wqe(qp
, 0);
4289 set_reg_umr_inline_seg(*seg
, qp
, mr
, mr_list_size
);
4290 *size
+= get_xlt_octo(mr_list_size
);
4292 set_reg_data_seg(*seg
, mr
, pd
);
4293 *seg
+= sizeof(struct mlx5_wqe_data_seg
);
4294 *size
+= (sizeof(struct mlx5_wqe_data_seg
) / 16);
4299 static void set_linv_wr(struct mlx5_ib_qp
*qp
, void **seg
, int *size
)
4301 set_linv_umr_seg(*seg
);
4302 *seg
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
);
4303 *size
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
) / 16;
4304 if (unlikely((*seg
== qp
->sq
.qend
)))
4305 *seg
= mlx5_get_send_wqe(qp
, 0);
4306 set_linv_mkey_seg(*seg
);
4307 *seg
+= sizeof(struct mlx5_mkey_seg
);
4308 *size
+= sizeof(struct mlx5_mkey_seg
) / 16;
4309 if (unlikely((*seg
== qp
->sq
.qend
)))
4310 *seg
= mlx5_get_send_wqe(qp
, 0);
4313 static void dump_wqe(struct mlx5_ib_qp
*qp
, int idx
, int size_16
)
4319 pr_debug("dump wqe at %p\n", mlx5_get_send_wqe(qp
, tidx
));
4320 for (i
= 0, j
= 0; i
< size_16
* 4; i
+= 4, j
+= 4) {
4321 if ((i
& 0xf) == 0) {
4322 void *buf
= mlx5_get_send_wqe(qp
, tidx
);
4323 tidx
= (tidx
+ 1) & (qp
->sq
.wqe_cnt
- 1);
4327 pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p
[j
]),
4328 be32_to_cpu(p
[j
+ 1]), be32_to_cpu(p
[j
+ 2]),
4329 be32_to_cpu(p
[j
+ 3]));
4333 static int __begin_wqe(struct mlx5_ib_qp
*qp
, void **seg
,
4334 struct mlx5_wqe_ctrl_seg
**ctrl
,
4335 const struct ib_send_wr
*wr
, unsigned *idx
,
4336 int *size
, int nreq
, bool send_signaled
, bool solicited
)
4338 if (unlikely(mlx5_wq_overflow(&qp
->sq
, nreq
, qp
->ibqp
.send_cq
)))
4341 *idx
= qp
->sq
.cur_post
& (qp
->sq
.wqe_cnt
- 1);
4342 *seg
= mlx5_get_send_wqe(qp
, *idx
);
4344 *(uint32_t *)(*seg
+ 8) = 0;
4345 (*ctrl
)->imm
= send_ieth(wr
);
4346 (*ctrl
)->fm_ce_se
= qp
->sq_signal_bits
|
4347 (send_signaled
? MLX5_WQE_CTRL_CQ_UPDATE
: 0) |
4348 (solicited
? MLX5_WQE_CTRL_SOLICITED
: 0);
4350 *seg
+= sizeof(**ctrl
);
4351 *size
= sizeof(**ctrl
) / 16;
4356 static int begin_wqe(struct mlx5_ib_qp
*qp
, void **seg
,
4357 struct mlx5_wqe_ctrl_seg
**ctrl
,
4358 const struct ib_send_wr
*wr
, unsigned *idx
,
4359 int *size
, int nreq
)
4361 return __begin_wqe(qp
, seg
, ctrl
, wr
, idx
, size
, nreq
,
4362 wr
->send_flags
& IB_SEND_SIGNALED
,
4363 wr
->send_flags
& IB_SEND_SOLICITED
);
4366 static void finish_wqe(struct mlx5_ib_qp
*qp
,
4367 struct mlx5_wqe_ctrl_seg
*ctrl
,
4368 u8 size
, unsigned idx
, u64 wr_id
,
4369 int nreq
, u8 fence
, u32 mlx5_opcode
)
4373 ctrl
->opmod_idx_opcode
= cpu_to_be32(((u32
)(qp
->sq
.cur_post
) << 8) |
4374 mlx5_opcode
| ((u32
)opmod
<< 24));
4375 ctrl
->qpn_ds
= cpu_to_be32(size
| (qp
->trans_qp
.base
.mqp
.qpn
<< 8));
4376 ctrl
->fm_ce_se
|= fence
;
4377 if (unlikely(qp
->wq_sig
))
4378 ctrl
->signature
= wq_sig(ctrl
);
4380 qp
->sq
.wrid
[idx
] = wr_id
;
4381 qp
->sq
.w_list
[idx
].opcode
= mlx5_opcode
;
4382 qp
->sq
.wqe_head
[idx
] = qp
->sq
.head
+ nreq
;
4383 qp
->sq
.cur_post
+= DIV_ROUND_UP(size
* 16, MLX5_SEND_WQE_BB
);
4384 qp
->sq
.w_list
[idx
].next
= qp
->sq
.cur_post
;
4387 static int _mlx5_ib_post_send(struct ib_qp
*ibqp
, const struct ib_send_wr
*wr
,
4388 const struct ib_send_wr
**bad_wr
, bool drain
)
4390 struct mlx5_wqe_ctrl_seg
*ctrl
= NULL
; /* compiler warning */
4391 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
4392 struct mlx5_core_dev
*mdev
= dev
->mdev
;
4393 struct mlx5_ib_qp
*qp
;
4394 struct mlx5_ib_mr
*mr
;
4395 struct mlx5_wqe_data_seg
*dpseg
;
4396 struct mlx5_wqe_xrc_seg
*xrc
;
4398 int uninitialized_var(size
);
4400 unsigned long flags
;
4410 if (unlikely(ibqp
->qp_type
== IB_QPT_GSI
))
4411 return mlx5_ib_gsi_post_send(ibqp
, wr
, bad_wr
);
4417 spin_lock_irqsave(&qp
->sq
.lock
, flags
);
4419 if (mdev
->state
== MLX5_DEVICE_STATE_INTERNAL_ERROR
&& !drain
) {
4426 for (nreq
= 0; wr
; nreq
++, wr
= wr
->next
) {
4427 if (unlikely(wr
->opcode
>= ARRAY_SIZE(mlx5_ib_opcode
))) {
4428 mlx5_ib_warn(dev
, "\n");
4434 num_sge
= wr
->num_sge
;
4435 if (unlikely(num_sge
> qp
->sq
.max_gs
)) {
4436 mlx5_ib_warn(dev
, "\n");
4442 err
= begin_wqe(qp
, &seg
, &ctrl
, wr
, &idx
, &size
, nreq
);
4444 mlx5_ib_warn(dev
, "\n");
4450 if (wr
->opcode
== IB_WR_LOCAL_INV
||
4451 wr
->opcode
== IB_WR_REG_MR
) {
4452 fence
= dev
->umr_fence
;
4453 next_fence
= MLX5_FENCE_MODE_INITIATOR_SMALL
;
4454 } else if (wr
->send_flags
& IB_SEND_FENCE
) {
4456 fence
= MLX5_FENCE_MODE_SMALL_AND_FENCE
;
4458 fence
= MLX5_FENCE_MODE_FENCE
;
4460 fence
= qp
->next_fence
;
4463 switch (ibqp
->qp_type
) {
4464 case IB_QPT_XRC_INI
:
4466 seg
+= sizeof(*xrc
);
4467 size
+= sizeof(*xrc
) / 16;
4470 switch (wr
->opcode
) {
4471 case IB_WR_RDMA_READ
:
4472 case IB_WR_RDMA_WRITE
:
4473 case IB_WR_RDMA_WRITE_WITH_IMM
:
4474 set_raddr_seg(seg
, rdma_wr(wr
)->remote_addr
,
4476 seg
+= sizeof(struct mlx5_wqe_raddr_seg
);
4477 size
+= sizeof(struct mlx5_wqe_raddr_seg
) / 16;
4480 case IB_WR_ATOMIC_CMP_AND_SWP
:
4481 case IB_WR_ATOMIC_FETCH_AND_ADD
:
4482 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP
:
4483 mlx5_ib_warn(dev
, "Atomic operations are not supported yet\n");
4488 case IB_WR_LOCAL_INV
:
4489 qp
->sq
.wr_data
[idx
] = IB_WR_LOCAL_INV
;
4490 ctrl
->imm
= cpu_to_be32(wr
->ex
.invalidate_rkey
);
4491 set_linv_wr(qp
, &seg
, &size
);
4496 qp
->sq
.wr_data
[idx
] = IB_WR_REG_MR
;
4497 ctrl
->imm
= cpu_to_be32(reg_wr(wr
)->key
);
4498 err
= set_reg_wr(qp
, reg_wr(wr
), &seg
, &size
);
4506 case IB_WR_REG_SIG_MR
:
4507 qp
->sq
.wr_data
[idx
] = IB_WR_REG_SIG_MR
;
4508 mr
= to_mmr(sig_handover_wr(wr
)->sig_mr
);
4510 ctrl
->imm
= cpu_to_be32(mr
->ibmr
.rkey
);
4511 err
= set_sig_umr_wr(wr
, qp
, &seg
, &size
);
4513 mlx5_ib_warn(dev
, "\n");
4518 finish_wqe(qp
, ctrl
, size
, idx
, wr
->wr_id
, nreq
,
4519 fence
, MLX5_OPCODE_UMR
);
4521 * SET_PSV WQEs are not signaled and solicited
4524 err
= __begin_wqe(qp
, &seg
, &ctrl
, wr
, &idx
,
4525 &size
, nreq
, false, true);
4527 mlx5_ib_warn(dev
, "\n");
4533 err
= set_psv_wr(&sig_handover_wr(wr
)->sig_attrs
->mem
,
4534 mr
->sig
->psv_memory
.psv_idx
, &seg
,
4537 mlx5_ib_warn(dev
, "\n");
4542 finish_wqe(qp
, ctrl
, size
, idx
, wr
->wr_id
, nreq
,
4543 fence
, MLX5_OPCODE_SET_PSV
);
4544 err
= __begin_wqe(qp
, &seg
, &ctrl
, wr
, &idx
,
4545 &size
, nreq
, false, true);
4547 mlx5_ib_warn(dev
, "\n");
4553 err
= set_psv_wr(&sig_handover_wr(wr
)->sig_attrs
->wire
,
4554 mr
->sig
->psv_wire
.psv_idx
, &seg
,
4557 mlx5_ib_warn(dev
, "\n");
4562 finish_wqe(qp
, ctrl
, size
, idx
, wr
->wr_id
, nreq
,
4563 fence
, MLX5_OPCODE_SET_PSV
);
4564 qp
->next_fence
= MLX5_FENCE_MODE_INITIATOR_SMALL
;
4574 switch (wr
->opcode
) {
4575 case IB_WR_RDMA_WRITE
:
4576 case IB_WR_RDMA_WRITE_WITH_IMM
:
4577 set_raddr_seg(seg
, rdma_wr(wr
)->remote_addr
,
4579 seg
+= sizeof(struct mlx5_wqe_raddr_seg
);
4580 size
+= sizeof(struct mlx5_wqe_raddr_seg
) / 16;
4589 if (unlikely(!mdev
->port_caps
[qp
->port
- 1].has_smi
)) {
4590 mlx5_ib_warn(dev
, "Send SMP MADs is not allowed\n");
4596 case MLX5_IB_QPT_HW_GSI
:
4597 set_datagram_seg(seg
, wr
);
4598 seg
+= sizeof(struct mlx5_wqe_datagram_seg
);
4599 size
+= sizeof(struct mlx5_wqe_datagram_seg
) / 16;
4600 if (unlikely((seg
== qend
)))
4601 seg
= mlx5_get_send_wqe(qp
, 0);
4604 set_datagram_seg(seg
, wr
);
4605 seg
+= sizeof(struct mlx5_wqe_datagram_seg
);
4606 size
+= sizeof(struct mlx5_wqe_datagram_seg
) / 16;
4608 if (unlikely((seg
== qend
)))
4609 seg
= mlx5_get_send_wqe(qp
, 0);
4611 /* handle qp that supports ud offload */
4612 if (qp
->flags
& IB_QP_CREATE_IPOIB_UD_LSO
) {
4613 struct mlx5_wqe_eth_pad
*pad
;
4616 memset(pad
, 0, sizeof(struct mlx5_wqe_eth_pad
));
4617 seg
+= sizeof(struct mlx5_wqe_eth_pad
);
4618 size
+= sizeof(struct mlx5_wqe_eth_pad
) / 16;
4620 seg
= set_eth_seg(seg
, wr
, qend
, qp
, &size
);
4622 if (unlikely((seg
== qend
)))
4623 seg
= mlx5_get_send_wqe(qp
, 0);
4626 case MLX5_IB_QPT_REG_UMR
:
4627 if (wr
->opcode
!= MLX5_IB_WR_UMR
) {
4629 mlx5_ib_warn(dev
, "bad opcode\n");
4632 qp
->sq
.wr_data
[idx
] = MLX5_IB_WR_UMR
;
4633 ctrl
->imm
= cpu_to_be32(umr_wr(wr
)->mkey
);
4634 err
= set_reg_umr_segment(dev
, seg
, wr
, !!(MLX5_CAP_GEN(mdev
, atomic
)));
4637 seg
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
);
4638 size
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
) / 16;
4639 if (unlikely((seg
== qend
)))
4640 seg
= mlx5_get_send_wqe(qp
, 0);
4641 set_reg_mkey_segment(seg
, wr
);
4642 seg
+= sizeof(struct mlx5_mkey_seg
);
4643 size
+= sizeof(struct mlx5_mkey_seg
) / 16;
4644 if (unlikely((seg
== qend
)))
4645 seg
= mlx5_get_send_wqe(qp
, 0);
4652 if (wr
->send_flags
& IB_SEND_INLINE
&& num_sge
) {
4653 int uninitialized_var(sz
);
4655 err
= set_data_inl_seg(qp
, wr
, seg
, &sz
);
4656 if (unlikely(err
)) {
4657 mlx5_ib_warn(dev
, "\n");
4664 for (i
= 0; i
< num_sge
; i
++) {
4665 if (unlikely(dpseg
== qend
)) {
4666 seg
= mlx5_get_send_wqe(qp
, 0);
4669 if (likely(wr
->sg_list
[i
].length
)) {
4670 set_data_ptr_seg(dpseg
, wr
->sg_list
+ i
);
4671 size
+= sizeof(struct mlx5_wqe_data_seg
) / 16;
4677 qp
->next_fence
= next_fence
;
4678 finish_wqe(qp
, ctrl
, size
, idx
, wr
->wr_id
, nreq
, fence
,
4679 mlx5_ib_opcode
[wr
->opcode
]);
4682 dump_wqe(qp
, idx
, size
);
4687 qp
->sq
.head
+= nreq
;
4689 /* Make sure that descriptors are written before
4690 * updating doorbell record and ringing the doorbell
4694 qp
->db
.db
[MLX5_SND_DBR
] = cpu_to_be32(qp
->sq
.cur_post
);
4696 /* Make sure doorbell record is visible to the HCA before
4697 * we hit doorbell */
4700 /* currently we support only regular doorbells */
4701 mlx5_write64((__be32
*)ctrl
, bf
->bfreg
->map
+ bf
->offset
, NULL
);
4702 /* Make sure doorbells don't leak out of SQ spinlock
4703 * and reach the HCA out of order.
4706 bf
->offset
^= bf
->buf_size
;
4709 spin_unlock_irqrestore(&qp
->sq
.lock
, flags
);
4714 int mlx5_ib_post_send(struct ib_qp
*ibqp
, const struct ib_send_wr
*wr
,
4715 const struct ib_send_wr
**bad_wr
)
4717 return _mlx5_ib_post_send(ibqp
, wr
, bad_wr
, false);
4720 static void set_sig_seg(struct mlx5_rwqe_sig
*sig
, int size
)
4722 sig
->signature
= calc_sig(sig
, size
);
4725 static int _mlx5_ib_post_recv(struct ib_qp
*ibqp
, const struct ib_recv_wr
*wr
,
4726 const struct ib_recv_wr
**bad_wr
, bool drain
)
4728 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
4729 struct mlx5_wqe_data_seg
*scat
;
4730 struct mlx5_rwqe_sig
*sig
;
4731 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
4732 struct mlx5_core_dev
*mdev
= dev
->mdev
;
4733 unsigned long flags
;
4739 if (unlikely(ibqp
->qp_type
== IB_QPT_GSI
))
4740 return mlx5_ib_gsi_post_recv(ibqp
, wr
, bad_wr
);
4742 spin_lock_irqsave(&qp
->rq
.lock
, flags
);
4744 if (mdev
->state
== MLX5_DEVICE_STATE_INTERNAL_ERROR
&& !drain
) {
4751 ind
= qp
->rq
.head
& (qp
->rq
.wqe_cnt
- 1);
4753 for (nreq
= 0; wr
; nreq
++, wr
= wr
->next
) {
4754 if (mlx5_wq_overflow(&qp
->rq
, nreq
, qp
->ibqp
.recv_cq
)) {
4760 if (unlikely(wr
->num_sge
> qp
->rq
.max_gs
)) {
4766 scat
= get_recv_wqe(qp
, ind
);
4770 for (i
= 0; i
< wr
->num_sge
; i
++)
4771 set_data_ptr_seg(scat
+ i
, wr
->sg_list
+ i
);
4773 if (i
< qp
->rq
.max_gs
) {
4774 scat
[i
].byte_count
= 0;
4775 scat
[i
].lkey
= cpu_to_be32(MLX5_INVALID_LKEY
);
4780 sig
= (struct mlx5_rwqe_sig
*)scat
;
4781 set_sig_seg(sig
, (qp
->rq
.max_gs
+ 1) << 2);
4784 qp
->rq
.wrid
[ind
] = wr
->wr_id
;
4786 ind
= (ind
+ 1) & (qp
->rq
.wqe_cnt
- 1);
4791 qp
->rq
.head
+= nreq
;
4793 /* Make sure that descriptors are written before
4798 *qp
->db
.db
= cpu_to_be32(qp
->rq
.head
& 0xffff);
4801 spin_unlock_irqrestore(&qp
->rq
.lock
, flags
);
4806 int mlx5_ib_post_recv(struct ib_qp
*ibqp
, const struct ib_recv_wr
*wr
,
4807 const struct ib_recv_wr
**bad_wr
)
4809 return _mlx5_ib_post_recv(ibqp
, wr
, bad_wr
, false);
4812 static inline enum ib_qp_state
to_ib_qp_state(enum mlx5_qp_state mlx5_state
)
4814 switch (mlx5_state
) {
4815 case MLX5_QP_STATE_RST
: return IB_QPS_RESET
;
4816 case MLX5_QP_STATE_INIT
: return IB_QPS_INIT
;
4817 case MLX5_QP_STATE_RTR
: return IB_QPS_RTR
;
4818 case MLX5_QP_STATE_RTS
: return IB_QPS_RTS
;
4819 case MLX5_QP_STATE_SQ_DRAINING
:
4820 case MLX5_QP_STATE_SQD
: return IB_QPS_SQD
;
4821 case MLX5_QP_STATE_SQER
: return IB_QPS_SQE
;
4822 case MLX5_QP_STATE_ERR
: return IB_QPS_ERR
;
4827 static inline enum ib_mig_state
to_ib_mig_state(int mlx5_mig_state
)
4829 switch (mlx5_mig_state
) {
4830 case MLX5_QP_PM_ARMED
: return IB_MIG_ARMED
;
4831 case MLX5_QP_PM_REARM
: return IB_MIG_REARM
;
4832 case MLX5_QP_PM_MIGRATED
: return IB_MIG_MIGRATED
;
4837 static int to_ib_qp_access_flags(int mlx5_flags
)
4841 if (mlx5_flags
& MLX5_QP_BIT_RRE
)
4842 ib_flags
|= IB_ACCESS_REMOTE_READ
;
4843 if (mlx5_flags
& MLX5_QP_BIT_RWE
)
4844 ib_flags
|= IB_ACCESS_REMOTE_WRITE
;
4845 if (mlx5_flags
& MLX5_QP_BIT_RAE
)
4846 ib_flags
|= IB_ACCESS_REMOTE_ATOMIC
;
4851 static void to_rdma_ah_attr(struct mlx5_ib_dev
*ibdev
,
4852 struct rdma_ah_attr
*ah_attr
,
4853 struct mlx5_qp_path
*path
)
4856 memset(ah_attr
, 0, sizeof(*ah_attr
));
4858 if (!path
->port
|| path
->port
> ibdev
->num_ports
)
4861 ah_attr
->type
= rdma_ah_find_type(&ibdev
->ib_dev
, path
->port
);
4863 rdma_ah_set_port_num(ah_attr
, path
->port
);
4864 rdma_ah_set_sl(ah_attr
, path
->dci_cfi_prio_sl
& 0xf);
4866 rdma_ah_set_dlid(ah_attr
, be16_to_cpu(path
->rlid
));
4867 rdma_ah_set_path_bits(ah_attr
, path
->grh_mlid
& 0x7f);
4868 rdma_ah_set_static_rate(ah_attr
,
4869 path
->static_rate
? path
->static_rate
- 5 : 0);
4870 if (path
->grh_mlid
& (1 << 7)) {
4871 u32 tc_fl
= be32_to_cpu(path
->tclass_flowlabel
);
4873 rdma_ah_set_grh(ah_attr
, NULL
,
4877 (tc_fl
>> 20) & 0xff);
4878 rdma_ah_set_dgid_raw(ah_attr
, path
->rgid
);
4882 static int query_raw_packet_qp_sq_state(struct mlx5_ib_dev
*dev
,
4883 struct mlx5_ib_sq
*sq
,
4888 err
= mlx5_core_query_sq_state(dev
->mdev
, sq
->base
.mqp
.qpn
, sq_state
);
4891 sq
->state
= *sq_state
;
4897 static int query_raw_packet_qp_rq_state(struct mlx5_ib_dev
*dev
,
4898 struct mlx5_ib_rq
*rq
,
4906 inlen
= MLX5_ST_SZ_BYTES(query_rq_out
);
4907 out
= kvzalloc(inlen
, GFP_KERNEL
);
4911 err
= mlx5_core_query_rq(dev
->mdev
, rq
->base
.mqp
.qpn
, out
);
4915 rqc
= MLX5_ADDR_OF(query_rq_out
, out
, rq_context
);
4916 *rq_state
= MLX5_GET(rqc
, rqc
, state
);
4917 rq
->state
= *rq_state
;
4924 static int sqrq_state_to_qp_state(u8 sq_state
, u8 rq_state
,
4925 struct mlx5_ib_qp
*qp
, u8
*qp_state
)
4927 static const u8 sqrq_trans
[MLX5_RQ_NUM_STATE
][MLX5_SQ_NUM_STATE
] = {
4928 [MLX5_RQC_STATE_RST
] = {
4929 [MLX5_SQC_STATE_RST
] = IB_QPS_RESET
,
4930 [MLX5_SQC_STATE_RDY
] = MLX5_QP_STATE_BAD
,
4931 [MLX5_SQC_STATE_ERR
] = MLX5_QP_STATE_BAD
,
4932 [MLX5_SQ_STATE_NA
] = IB_QPS_RESET
,
4934 [MLX5_RQC_STATE_RDY
] = {
4935 [MLX5_SQC_STATE_RST
] = MLX5_QP_STATE_BAD
,
4936 [MLX5_SQC_STATE_RDY
] = MLX5_QP_STATE
,
4937 [MLX5_SQC_STATE_ERR
] = IB_QPS_SQE
,
4938 [MLX5_SQ_STATE_NA
] = MLX5_QP_STATE
,
4940 [MLX5_RQC_STATE_ERR
] = {
4941 [MLX5_SQC_STATE_RST
] = MLX5_QP_STATE_BAD
,
4942 [MLX5_SQC_STATE_RDY
] = MLX5_QP_STATE_BAD
,
4943 [MLX5_SQC_STATE_ERR
] = IB_QPS_ERR
,
4944 [MLX5_SQ_STATE_NA
] = IB_QPS_ERR
,
4946 [MLX5_RQ_STATE_NA
] = {
4947 [MLX5_SQC_STATE_RST
] = IB_QPS_RESET
,
4948 [MLX5_SQC_STATE_RDY
] = MLX5_QP_STATE
,
4949 [MLX5_SQC_STATE_ERR
] = MLX5_QP_STATE
,
4950 [MLX5_SQ_STATE_NA
] = MLX5_QP_STATE_BAD
,
4954 *qp_state
= sqrq_trans
[rq_state
][sq_state
];
4956 if (*qp_state
== MLX5_QP_STATE_BAD
) {
4957 WARN(1, "Buggy Raw Packet QP state, SQ 0x%x state: 0x%x, RQ 0x%x state: 0x%x",
4958 qp
->raw_packet_qp
.sq
.base
.mqp
.qpn
, sq_state
,
4959 qp
->raw_packet_qp
.rq
.base
.mqp
.qpn
, rq_state
);
4963 if (*qp_state
== MLX5_QP_STATE
)
4964 *qp_state
= qp
->state
;
4969 static int query_raw_packet_qp_state(struct mlx5_ib_dev
*dev
,
4970 struct mlx5_ib_qp
*qp
,
4971 u8
*raw_packet_qp_state
)
4973 struct mlx5_ib_raw_packet_qp
*raw_packet_qp
= &qp
->raw_packet_qp
;
4974 struct mlx5_ib_sq
*sq
= &raw_packet_qp
->sq
;
4975 struct mlx5_ib_rq
*rq
= &raw_packet_qp
->rq
;
4977 u8 sq_state
= MLX5_SQ_STATE_NA
;
4978 u8 rq_state
= MLX5_RQ_STATE_NA
;
4980 if (qp
->sq
.wqe_cnt
) {
4981 err
= query_raw_packet_qp_sq_state(dev
, sq
, &sq_state
);
4986 if (qp
->rq
.wqe_cnt
) {
4987 err
= query_raw_packet_qp_rq_state(dev
, rq
, &rq_state
);
4992 return sqrq_state_to_qp_state(sq_state
, rq_state
, qp
,
4993 raw_packet_qp_state
);
4996 static int query_qp_attr(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
4997 struct ib_qp_attr
*qp_attr
)
4999 int outlen
= MLX5_ST_SZ_BYTES(query_qp_out
);
5000 struct mlx5_qp_context
*context
;
5005 outb
= kzalloc(outlen
, GFP_KERNEL
);
5009 err
= mlx5_core_qp_query(dev
->mdev
, &qp
->trans_qp
.base
.mqp
, outb
,
5014 /* FIXME: use MLX5_GET rather than mlx5_qp_context manual struct */
5015 context
= (struct mlx5_qp_context
*)MLX5_ADDR_OF(query_qp_out
, outb
, qpc
);
5017 mlx5_state
= be32_to_cpu(context
->flags
) >> 28;
5019 qp
->state
= to_ib_qp_state(mlx5_state
);
5020 qp_attr
->path_mtu
= context
->mtu_msgmax
>> 5;
5021 qp_attr
->path_mig_state
=
5022 to_ib_mig_state((be32_to_cpu(context
->flags
) >> 11) & 0x3);
5023 qp_attr
->qkey
= be32_to_cpu(context
->qkey
);
5024 qp_attr
->rq_psn
= be32_to_cpu(context
->rnr_nextrecvpsn
) & 0xffffff;
5025 qp_attr
->sq_psn
= be32_to_cpu(context
->next_send_psn
) & 0xffffff;
5026 qp_attr
->dest_qp_num
= be32_to_cpu(context
->log_pg_sz_remote_qpn
) & 0xffffff;
5027 qp_attr
->qp_access_flags
=
5028 to_ib_qp_access_flags(be32_to_cpu(context
->params2
));
5030 if (qp
->ibqp
.qp_type
== IB_QPT_RC
|| qp
->ibqp
.qp_type
== IB_QPT_UC
) {
5031 to_rdma_ah_attr(dev
, &qp_attr
->ah_attr
, &context
->pri_path
);
5032 to_rdma_ah_attr(dev
, &qp_attr
->alt_ah_attr
, &context
->alt_path
);
5033 qp_attr
->alt_pkey_index
=
5034 be16_to_cpu(context
->alt_path
.pkey_index
);
5035 qp_attr
->alt_port_num
=
5036 rdma_ah_get_port_num(&qp_attr
->alt_ah_attr
);
5039 qp_attr
->pkey_index
= be16_to_cpu(context
->pri_path
.pkey_index
);
5040 qp_attr
->port_num
= context
->pri_path
.port
;
5042 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
5043 qp_attr
->sq_draining
= mlx5_state
== MLX5_QP_STATE_SQ_DRAINING
;
5045 qp_attr
->max_rd_atomic
= 1 << ((be32_to_cpu(context
->params1
) >> 21) & 0x7);
5047 qp_attr
->max_dest_rd_atomic
=
5048 1 << ((be32_to_cpu(context
->params2
) >> 21) & 0x7);
5049 qp_attr
->min_rnr_timer
=
5050 (be32_to_cpu(context
->rnr_nextrecvpsn
) >> 24) & 0x1f;
5051 qp_attr
->timeout
= context
->pri_path
.ackto_lt
>> 3;
5052 qp_attr
->retry_cnt
= (be32_to_cpu(context
->params1
) >> 16) & 0x7;
5053 qp_attr
->rnr_retry
= (be32_to_cpu(context
->params1
) >> 13) & 0x7;
5054 qp_attr
->alt_timeout
= context
->alt_path
.ackto_lt
>> 3;
5061 static int mlx5_ib_dct_query_qp(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*mqp
,
5062 struct ib_qp_attr
*qp_attr
, int qp_attr_mask
,
5063 struct ib_qp_init_attr
*qp_init_attr
)
5065 struct mlx5_core_dct
*dct
= &mqp
->dct
.mdct
;
5067 u32 access_flags
= 0;
5068 int outlen
= MLX5_ST_SZ_BYTES(query_dct_out
);
5071 int supported_mask
= IB_QP_STATE
|
5072 IB_QP_ACCESS_FLAGS
|
5074 IB_QP_MIN_RNR_TIMER
|
5079 if (qp_attr_mask
& ~supported_mask
)
5081 if (mqp
->state
!= IB_QPS_RTR
)
5084 out
= kzalloc(outlen
, GFP_KERNEL
);
5088 err
= mlx5_core_dct_query(dev
->mdev
, dct
, out
, outlen
);
5092 dctc
= MLX5_ADDR_OF(query_dct_out
, out
, dct_context_entry
);
5094 if (qp_attr_mask
& IB_QP_STATE
)
5095 qp_attr
->qp_state
= IB_QPS_RTR
;
5097 if (qp_attr_mask
& IB_QP_ACCESS_FLAGS
) {
5098 if (MLX5_GET(dctc
, dctc
, rre
))
5099 access_flags
|= IB_ACCESS_REMOTE_READ
;
5100 if (MLX5_GET(dctc
, dctc
, rwe
))
5101 access_flags
|= IB_ACCESS_REMOTE_WRITE
;
5102 if (MLX5_GET(dctc
, dctc
, rae
))
5103 access_flags
|= IB_ACCESS_REMOTE_ATOMIC
;
5104 qp_attr
->qp_access_flags
= access_flags
;
5107 if (qp_attr_mask
& IB_QP_PORT
)
5108 qp_attr
->port_num
= MLX5_GET(dctc
, dctc
, port
);
5109 if (qp_attr_mask
& IB_QP_MIN_RNR_TIMER
)
5110 qp_attr
->min_rnr_timer
= MLX5_GET(dctc
, dctc
, min_rnr_nak
);
5111 if (qp_attr_mask
& IB_QP_AV
) {
5112 qp_attr
->ah_attr
.grh
.traffic_class
= MLX5_GET(dctc
, dctc
, tclass
);
5113 qp_attr
->ah_attr
.grh
.flow_label
= MLX5_GET(dctc
, dctc
, flow_label
);
5114 qp_attr
->ah_attr
.grh
.sgid_index
= MLX5_GET(dctc
, dctc
, my_addr_index
);
5115 qp_attr
->ah_attr
.grh
.hop_limit
= MLX5_GET(dctc
, dctc
, hop_limit
);
5117 if (qp_attr_mask
& IB_QP_PATH_MTU
)
5118 qp_attr
->path_mtu
= MLX5_GET(dctc
, dctc
, mtu
);
5119 if (qp_attr_mask
& IB_QP_PKEY_INDEX
)
5120 qp_attr
->pkey_index
= MLX5_GET(dctc
, dctc
, pkey_index
);
5126 int mlx5_ib_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*qp_attr
,
5127 int qp_attr_mask
, struct ib_qp_init_attr
*qp_init_attr
)
5129 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
5130 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
5132 u8 raw_packet_qp_state
;
5134 if (ibqp
->rwq_ind_tbl
)
5137 if (unlikely(ibqp
->qp_type
== IB_QPT_GSI
))
5138 return mlx5_ib_gsi_query_qp(ibqp
, qp_attr
, qp_attr_mask
,
5141 /* Not all of output fields are applicable, make sure to zero them */
5142 memset(qp_init_attr
, 0, sizeof(*qp_init_attr
));
5143 memset(qp_attr
, 0, sizeof(*qp_attr
));
5145 if (unlikely(qp
->qp_sub_type
== MLX5_IB_QPT_DCT
))
5146 return mlx5_ib_dct_query_qp(dev
, qp
, qp_attr
,
5147 qp_attr_mask
, qp_init_attr
);
5149 mutex_lock(&qp
->mutex
);
5151 if (qp
->ibqp
.qp_type
== IB_QPT_RAW_PACKET
||
5152 qp
->flags
& MLX5_IB_QP_UNDERLAY
) {
5153 err
= query_raw_packet_qp_state(dev
, qp
, &raw_packet_qp_state
);
5156 qp
->state
= raw_packet_qp_state
;
5157 qp_attr
->port_num
= 1;
5159 err
= query_qp_attr(dev
, qp
, qp_attr
);
5164 qp_attr
->qp_state
= qp
->state
;
5165 qp_attr
->cur_qp_state
= qp_attr
->qp_state
;
5166 qp_attr
->cap
.max_recv_wr
= qp
->rq
.wqe_cnt
;
5167 qp_attr
->cap
.max_recv_sge
= qp
->rq
.max_gs
;
5169 if (!ibqp
->uobject
) {
5170 qp_attr
->cap
.max_send_wr
= qp
->sq
.max_post
;
5171 qp_attr
->cap
.max_send_sge
= qp
->sq
.max_gs
;
5172 qp_init_attr
->qp_context
= ibqp
->qp_context
;
5174 qp_attr
->cap
.max_send_wr
= 0;
5175 qp_attr
->cap
.max_send_sge
= 0;
5178 qp_init_attr
->qp_type
= ibqp
->qp_type
;
5179 qp_init_attr
->recv_cq
= ibqp
->recv_cq
;
5180 qp_init_attr
->send_cq
= ibqp
->send_cq
;
5181 qp_init_attr
->srq
= ibqp
->srq
;
5182 qp_attr
->cap
.max_inline_data
= qp
->max_inline_data
;
5184 qp_init_attr
->cap
= qp_attr
->cap
;
5186 qp_init_attr
->create_flags
= 0;
5187 if (qp
->flags
& MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK
)
5188 qp_init_attr
->create_flags
|= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
;
5190 if (qp
->flags
& MLX5_IB_QP_CROSS_CHANNEL
)
5191 qp_init_attr
->create_flags
|= IB_QP_CREATE_CROSS_CHANNEL
;
5192 if (qp
->flags
& MLX5_IB_QP_MANAGED_SEND
)
5193 qp_init_attr
->create_flags
|= IB_QP_CREATE_MANAGED_SEND
;
5194 if (qp
->flags
& MLX5_IB_QP_MANAGED_RECV
)
5195 qp_init_attr
->create_flags
|= IB_QP_CREATE_MANAGED_RECV
;
5196 if (qp
->flags
& MLX5_IB_QP_SQPN_QP1
)
5197 qp_init_attr
->create_flags
|= mlx5_ib_create_qp_sqpn_qp1();
5199 qp_init_attr
->sq_sig_type
= qp
->sq_signal_bits
& MLX5_WQE_CTRL_CQ_UPDATE
?
5200 IB_SIGNAL_ALL_WR
: IB_SIGNAL_REQ_WR
;
5203 mutex_unlock(&qp
->mutex
);
5207 struct ib_xrcd
*mlx5_ib_alloc_xrcd(struct ib_device
*ibdev
,
5208 struct ib_ucontext
*context
,
5209 struct ib_udata
*udata
)
5211 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
5212 struct mlx5_ib_xrcd
*xrcd
;
5215 if (!MLX5_CAP_GEN(dev
->mdev
, xrc
))
5216 return ERR_PTR(-ENOSYS
);
5218 xrcd
= kmalloc(sizeof(*xrcd
), GFP_KERNEL
);
5220 return ERR_PTR(-ENOMEM
);
5222 err
= mlx5_core_xrcd_alloc(dev
->mdev
, &xrcd
->xrcdn
);
5225 return ERR_PTR(-ENOMEM
);
5228 return &xrcd
->ibxrcd
;
5231 int mlx5_ib_dealloc_xrcd(struct ib_xrcd
*xrcd
)
5233 struct mlx5_ib_dev
*dev
= to_mdev(xrcd
->device
);
5234 u32 xrcdn
= to_mxrcd(xrcd
)->xrcdn
;
5237 err
= mlx5_core_xrcd_dealloc(dev
->mdev
, xrcdn
);
5239 mlx5_ib_warn(dev
, "failed to dealloc xrcdn 0x%x\n", xrcdn
);
5245 static void mlx5_ib_wq_event(struct mlx5_core_qp
*core_qp
, int type
)
5247 struct mlx5_ib_rwq
*rwq
= to_mibrwq(core_qp
);
5248 struct mlx5_ib_dev
*dev
= to_mdev(rwq
->ibwq
.device
);
5249 struct ib_event event
;
5251 if (rwq
->ibwq
.event_handler
) {
5252 event
.device
= rwq
->ibwq
.device
;
5253 event
.element
.wq
= &rwq
->ibwq
;
5255 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR
:
5256 event
.event
= IB_EVENT_WQ_FATAL
;
5259 mlx5_ib_warn(dev
, "Unexpected event type %d on WQ %06x\n", type
, core_qp
->qpn
);
5263 rwq
->ibwq
.event_handler(&event
, rwq
->ibwq
.wq_context
);
5267 static int set_delay_drop(struct mlx5_ib_dev
*dev
)
5271 mutex_lock(&dev
->delay_drop
.lock
);
5272 if (dev
->delay_drop
.activate
)
5275 err
= mlx5_core_set_delay_drop(dev
->mdev
, dev
->delay_drop
.timeout
);
5279 dev
->delay_drop
.activate
= true;
5281 mutex_unlock(&dev
->delay_drop
.lock
);
5284 atomic_inc(&dev
->delay_drop
.rqs_cnt
);
5288 static int create_rq(struct mlx5_ib_rwq
*rwq
, struct ib_pd
*pd
,
5289 struct ib_wq_init_attr
*init_attr
)
5291 struct mlx5_ib_dev
*dev
;
5292 int has_net_offloads
;
5300 dev
= to_mdev(pd
->device
);
5302 inlen
= MLX5_ST_SZ_BYTES(create_rq_in
) + sizeof(u64
) * rwq
->rq_num_pas
;
5303 in
= kvzalloc(inlen
, GFP_KERNEL
);
5307 rqc
= MLX5_ADDR_OF(create_rq_in
, in
, ctx
);
5308 MLX5_SET(rqc
, rqc
, mem_rq_type
,
5309 MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE
);
5310 MLX5_SET(rqc
, rqc
, user_index
, rwq
->user_index
);
5311 MLX5_SET(rqc
, rqc
, cqn
, to_mcq(init_attr
->cq
)->mcq
.cqn
);
5312 MLX5_SET(rqc
, rqc
, state
, MLX5_RQC_STATE_RST
);
5313 MLX5_SET(rqc
, rqc
, flush_in_error_en
, 1);
5314 wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
5315 MLX5_SET(wq
, wq
, wq_type
,
5316 rwq
->create_flags
& MLX5_IB_WQ_FLAGS_STRIDING_RQ
?
5317 MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ
: MLX5_WQ_TYPE_CYCLIC
);
5318 if (init_attr
->create_flags
& IB_WQ_FLAGS_PCI_WRITE_END_PADDING
) {
5319 if (!MLX5_CAP_GEN(dev
->mdev
, end_pad
)) {
5320 mlx5_ib_dbg(dev
, "Scatter end padding is not supported\n");
5324 MLX5_SET(wq
, wq
, end_padding_mode
, MLX5_WQ_END_PAD_MODE_ALIGN
);
5327 MLX5_SET(wq
, wq
, log_wq_stride
, rwq
->log_rq_stride
);
5328 if (rwq
->create_flags
& MLX5_IB_WQ_FLAGS_STRIDING_RQ
) {
5329 MLX5_SET(wq
, wq
, two_byte_shift_en
, rwq
->two_byte_shift_en
);
5330 MLX5_SET(wq
, wq
, log_wqe_stride_size
,
5331 rwq
->single_stride_log_num_of_bytes
-
5332 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES
);
5333 MLX5_SET(wq
, wq
, log_wqe_num_of_strides
, rwq
->log_num_strides
-
5334 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES
);
5336 MLX5_SET(wq
, wq
, log_wq_sz
, rwq
->log_rq_size
);
5337 MLX5_SET(wq
, wq
, pd
, to_mpd(pd
)->pdn
);
5338 MLX5_SET(wq
, wq
, page_offset
, rwq
->rq_page_offset
);
5339 MLX5_SET(wq
, wq
, log_wq_pg_sz
, rwq
->log_page_size
);
5340 MLX5_SET(wq
, wq
, wq_signature
, rwq
->wq_sig
);
5341 MLX5_SET64(wq
, wq
, dbr_addr
, rwq
->db
.dma
);
5342 has_net_offloads
= MLX5_CAP_GEN(dev
->mdev
, eth_net_offloads
);
5343 if (init_attr
->create_flags
& IB_WQ_FLAGS_CVLAN_STRIPPING
) {
5344 if (!(has_net_offloads
&& MLX5_CAP_ETH(dev
->mdev
, vlan_cap
))) {
5345 mlx5_ib_dbg(dev
, "VLAN offloads are not supported\n");
5350 MLX5_SET(rqc
, rqc
, vsd
, 1);
5352 if (init_attr
->create_flags
& IB_WQ_FLAGS_SCATTER_FCS
) {
5353 if (!(has_net_offloads
&& MLX5_CAP_ETH(dev
->mdev
, scatter_fcs
))) {
5354 mlx5_ib_dbg(dev
, "Scatter FCS is not supported\n");
5358 MLX5_SET(rqc
, rqc
, scatter_fcs
, 1);
5360 if (init_attr
->create_flags
& IB_WQ_FLAGS_DELAY_DROP
) {
5361 if (!(dev
->ib_dev
.attrs
.raw_packet_caps
&
5362 IB_RAW_PACKET_CAP_DELAY_DROP
)) {
5363 mlx5_ib_dbg(dev
, "Delay drop is not supported\n");
5367 MLX5_SET(rqc
, rqc
, delay_drop_en
, 1);
5369 rq_pas0
= (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
);
5370 mlx5_ib_populate_pas(dev
, rwq
->umem
, rwq
->page_shift
, rq_pas0
, 0);
5371 err
= mlx5_core_create_rq_tracked(dev
->mdev
, in
, inlen
, &rwq
->core_qp
);
5372 if (!err
&& init_attr
->create_flags
& IB_WQ_FLAGS_DELAY_DROP
) {
5373 err
= set_delay_drop(dev
);
5375 mlx5_ib_warn(dev
, "Failed to enable delay drop err=%d\n",
5377 mlx5_core_destroy_rq_tracked(dev
->mdev
, &rwq
->core_qp
);
5379 rwq
->create_flags
|= MLX5_IB_WQ_FLAGS_DELAY_DROP
;
5387 static int set_user_rq_size(struct mlx5_ib_dev
*dev
,
5388 struct ib_wq_init_attr
*wq_init_attr
,
5389 struct mlx5_ib_create_wq
*ucmd
,
5390 struct mlx5_ib_rwq
*rwq
)
5392 /* Sanity check RQ size before proceeding */
5393 if (wq_init_attr
->max_wr
> (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_wq_sz
)))
5396 if (!ucmd
->rq_wqe_count
)
5399 rwq
->wqe_count
= ucmd
->rq_wqe_count
;
5400 rwq
->wqe_shift
= ucmd
->rq_wqe_shift
;
5401 if (check_shl_overflow(rwq
->wqe_count
, rwq
->wqe_shift
, &rwq
->buf_size
))
5404 rwq
->log_rq_stride
= rwq
->wqe_shift
;
5405 rwq
->log_rq_size
= ilog2(rwq
->wqe_count
);
5409 static int prepare_user_rq(struct ib_pd
*pd
,
5410 struct ib_wq_init_attr
*init_attr
,
5411 struct ib_udata
*udata
,
5412 struct mlx5_ib_rwq
*rwq
)
5414 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
5415 struct mlx5_ib_create_wq ucmd
= {};
5417 size_t required_cmd_sz
;
5419 required_cmd_sz
= offsetof(typeof(ucmd
), single_stride_log_num_of_bytes
)
5420 + sizeof(ucmd
.single_stride_log_num_of_bytes
);
5421 if (udata
->inlen
< required_cmd_sz
) {
5422 mlx5_ib_dbg(dev
, "invalid inlen\n");
5426 if (udata
->inlen
> sizeof(ucmd
) &&
5427 !ib_is_udata_cleared(udata
, sizeof(ucmd
),
5428 udata
->inlen
- sizeof(ucmd
))) {
5429 mlx5_ib_dbg(dev
, "inlen is not supported\n");
5433 if (ib_copy_from_udata(&ucmd
, udata
, min(sizeof(ucmd
), udata
->inlen
))) {
5434 mlx5_ib_dbg(dev
, "copy failed\n");
5438 if (ucmd
.comp_mask
& (~MLX5_IB_CREATE_WQ_STRIDING_RQ
)) {
5439 mlx5_ib_dbg(dev
, "invalid comp mask\n");
5441 } else if (ucmd
.comp_mask
& MLX5_IB_CREATE_WQ_STRIDING_RQ
) {
5442 if (!MLX5_CAP_GEN(dev
->mdev
, striding_rq
)) {
5443 mlx5_ib_dbg(dev
, "Striding RQ is not supported\n");
5446 if ((ucmd
.single_stride_log_num_of_bytes
<
5447 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES
) ||
5448 (ucmd
.single_stride_log_num_of_bytes
>
5449 MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES
)) {
5450 mlx5_ib_dbg(dev
, "Invalid log stride size (%u. Range is %u - %u)\n",
5451 ucmd
.single_stride_log_num_of_bytes
,
5452 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES
,
5453 MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES
);
5456 if ((ucmd
.single_wqe_log_num_of_strides
>
5457 MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES
) ||
5458 (ucmd
.single_wqe_log_num_of_strides
<
5459 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES
)) {
5460 mlx5_ib_dbg(dev
, "Invalid log num strides (%u. Range is %u - %u)\n",
5461 ucmd
.single_wqe_log_num_of_strides
,
5462 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES
,
5463 MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES
);
5466 rwq
->single_stride_log_num_of_bytes
=
5467 ucmd
.single_stride_log_num_of_bytes
;
5468 rwq
->log_num_strides
= ucmd
.single_wqe_log_num_of_strides
;
5469 rwq
->two_byte_shift_en
= !!ucmd
.two_byte_shift_en
;
5470 rwq
->create_flags
|= MLX5_IB_WQ_FLAGS_STRIDING_RQ
;
5473 err
= set_user_rq_size(dev
, init_attr
, &ucmd
, rwq
);
5475 mlx5_ib_dbg(dev
, "err %d\n", err
);
5479 err
= create_user_rq(dev
, pd
, rwq
, &ucmd
);
5481 mlx5_ib_dbg(dev
, "err %d\n", err
);
5486 rwq
->user_index
= ucmd
.user_index
;
5490 struct ib_wq
*mlx5_ib_create_wq(struct ib_pd
*pd
,
5491 struct ib_wq_init_attr
*init_attr
,
5492 struct ib_udata
*udata
)
5494 struct mlx5_ib_dev
*dev
;
5495 struct mlx5_ib_rwq
*rwq
;
5496 struct mlx5_ib_create_wq_resp resp
= {};
5497 size_t min_resp_len
;
5501 return ERR_PTR(-ENOSYS
);
5503 min_resp_len
= offsetof(typeof(resp
), reserved
) + sizeof(resp
.reserved
);
5504 if (udata
->outlen
&& udata
->outlen
< min_resp_len
)
5505 return ERR_PTR(-EINVAL
);
5507 dev
= to_mdev(pd
->device
);
5508 switch (init_attr
->wq_type
) {
5510 rwq
= kzalloc(sizeof(*rwq
), GFP_KERNEL
);
5512 return ERR_PTR(-ENOMEM
);
5513 err
= prepare_user_rq(pd
, init_attr
, udata
, rwq
);
5516 err
= create_rq(rwq
, pd
, init_attr
);
5521 mlx5_ib_dbg(dev
, "unsupported wq type %d\n",
5522 init_attr
->wq_type
);
5523 return ERR_PTR(-EINVAL
);
5526 rwq
->ibwq
.wq_num
= rwq
->core_qp
.qpn
;
5527 rwq
->ibwq
.state
= IB_WQS_RESET
;
5528 if (udata
->outlen
) {
5529 resp
.response_length
= offsetof(typeof(resp
), response_length
) +
5530 sizeof(resp
.response_length
);
5531 err
= ib_copy_to_udata(udata
, &resp
, resp
.response_length
);
5536 rwq
->core_qp
.event
= mlx5_ib_wq_event
;
5537 rwq
->ibwq
.event_handler
= init_attr
->event_handler
;
5541 mlx5_core_destroy_rq_tracked(dev
->mdev
, &rwq
->core_qp
);
5543 destroy_user_rq(dev
, pd
, rwq
);
5546 return ERR_PTR(err
);
5549 int mlx5_ib_destroy_wq(struct ib_wq
*wq
)
5551 struct mlx5_ib_dev
*dev
= to_mdev(wq
->device
);
5552 struct mlx5_ib_rwq
*rwq
= to_mrwq(wq
);
5554 mlx5_core_destroy_rq_tracked(dev
->mdev
, &rwq
->core_qp
);
5555 destroy_user_rq(dev
, wq
->pd
, rwq
);
5561 struct ib_rwq_ind_table
*mlx5_ib_create_rwq_ind_table(struct ib_device
*device
,
5562 struct ib_rwq_ind_table_init_attr
*init_attr
,
5563 struct ib_udata
*udata
)
5565 struct mlx5_ib_dev
*dev
= to_mdev(device
);
5566 struct mlx5_ib_rwq_ind_table
*rwq_ind_tbl
;
5567 int sz
= 1 << init_attr
->log_ind_tbl_size
;
5568 struct mlx5_ib_create_rwq_ind_tbl_resp resp
= {};
5569 size_t min_resp_len
;
5576 if (udata
->inlen
> 0 &&
5577 !ib_is_udata_cleared(udata
, 0,
5579 return ERR_PTR(-EOPNOTSUPP
);
5581 if (init_attr
->log_ind_tbl_size
>
5582 MLX5_CAP_GEN(dev
->mdev
, log_max_rqt_size
)) {
5583 mlx5_ib_dbg(dev
, "log_ind_tbl_size = %d is bigger than supported = %d\n",
5584 init_attr
->log_ind_tbl_size
,
5585 MLX5_CAP_GEN(dev
->mdev
, log_max_rqt_size
));
5586 return ERR_PTR(-EINVAL
);
5589 min_resp_len
= offsetof(typeof(resp
), reserved
) + sizeof(resp
.reserved
);
5590 if (udata
->outlen
&& udata
->outlen
< min_resp_len
)
5591 return ERR_PTR(-EINVAL
);
5593 rwq_ind_tbl
= kzalloc(sizeof(*rwq_ind_tbl
), GFP_KERNEL
);
5595 return ERR_PTR(-ENOMEM
);
5597 inlen
= MLX5_ST_SZ_BYTES(create_rqt_in
) + sizeof(u32
) * sz
;
5598 in
= kvzalloc(inlen
, GFP_KERNEL
);
5604 rqtc
= MLX5_ADDR_OF(create_rqt_in
, in
, rqt_context
);
5606 MLX5_SET(rqtc
, rqtc
, rqt_actual_size
, sz
);
5607 MLX5_SET(rqtc
, rqtc
, rqt_max_size
, sz
);
5609 for (i
= 0; i
< sz
; i
++)
5610 MLX5_SET(rqtc
, rqtc
, rq_num
[i
], init_attr
->ind_tbl
[i
]->wq_num
);
5612 err
= mlx5_core_create_rqt(dev
->mdev
, in
, inlen
, &rwq_ind_tbl
->rqtn
);
5618 rwq_ind_tbl
->ib_rwq_ind_tbl
.ind_tbl_num
= rwq_ind_tbl
->rqtn
;
5619 if (udata
->outlen
) {
5620 resp
.response_length
= offsetof(typeof(resp
), response_length
) +
5621 sizeof(resp
.response_length
);
5622 err
= ib_copy_to_udata(udata
, &resp
, resp
.response_length
);
5627 return &rwq_ind_tbl
->ib_rwq_ind_tbl
;
5630 mlx5_core_destroy_rqt(dev
->mdev
, rwq_ind_tbl
->rqtn
);
5633 return ERR_PTR(err
);
5636 int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table
*ib_rwq_ind_tbl
)
5638 struct mlx5_ib_rwq_ind_table
*rwq_ind_tbl
= to_mrwq_ind_table(ib_rwq_ind_tbl
);
5639 struct mlx5_ib_dev
*dev
= to_mdev(ib_rwq_ind_tbl
->device
);
5641 mlx5_core_destroy_rqt(dev
->mdev
, rwq_ind_tbl
->rqtn
);
5647 int mlx5_ib_modify_wq(struct ib_wq
*wq
, struct ib_wq_attr
*wq_attr
,
5648 u32 wq_attr_mask
, struct ib_udata
*udata
)
5650 struct mlx5_ib_dev
*dev
= to_mdev(wq
->device
);
5651 struct mlx5_ib_rwq
*rwq
= to_mrwq(wq
);
5652 struct mlx5_ib_modify_wq ucmd
= {};
5653 size_t required_cmd_sz
;
5661 required_cmd_sz
= offsetof(typeof(ucmd
), reserved
) + sizeof(ucmd
.reserved
);
5662 if (udata
->inlen
< required_cmd_sz
)
5665 if (udata
->inlen
> sizeof(ucmd
) &&
5666 !ib_is_udata_cleared(udata
, sizeof(ucmd
),
5667 udata
->inlen
- sizeof(ucmd
)))
5670 if (ib_copy_from_udata(&ucmd
, udata
, min(sizeof(ucmd
), udata
->inlen
)))
5673 if (ucmd
.comp_mask
|| ucmd
.reserved
)
5676 inlen
= MLX5_ST_SZ_BYTES(modify_rq_in
);
5677 in
= kvzalloc(inlen
, GFP_KERNEL
);
5681 rqc
= MLX5_ADDR_OF(modify_rq_in
, in
, ctx
);
5683 curr_wq_state
= (wq_attr_mask
& IB_WQ_CUR_STATE
) ?
5684 wq_attr
->curr_wq_state
: wq
->state
;
5685 wq_state
= (wq_attr_mask
& IB_WQ_STATE
) ?
5686 wq_attr
->wq_state
: curr_wq_state
;
5687 if (curr_wq_state
== IB_WQS_ERR
)
5688 curr_wq_state
= MLX5_RQC_STATE_ERR
;
5689 if (wq_state
== IB_WQS_ERR
)
5690 wq_state
= MLX5_RQC_STATE_ERR
;
5691 MLX5_SET(modify_rq_in
, in
, rq_state
, curr_wq_state
);
5692 MLX5_SET(rqc
, rqc
, state
, wq_state
);
5694 if (wq_attr_mask
& IB_WQ_FLAGS
) {
5695 if (wq_attr
->flags_mask
& IB_WQ_FLAGS_CVLAN_STRIPPING
) {
5696 if (!(MLX5_CAP_GEN(dev
->mdev
, eth_net_offloads
) &&
5697 MLX5_CAP_ETH(dev
->mdev
, vlan_cap
))) {
5698 mlx5_ib_dbg(dev
, "VLAN offloads are not "
5703 MLX5_SET64(modify_rq_in
, in
, modify_bitmask
,
5704 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD
);
5705 MLX5_SET(rqc
, rqc
, vsd
,
5706 (wq_attr
->flags
& IB_WQ_FLAGS_CVLAN_STRIPPING
) ? 0 : 1);
5709 if (wq_attr
->flags_mask
& IB_WQ_FLAGS_PCI_WRITE_END_PADDING
) {
5710 mlx5_ib_dbg(dev
, "Modifying scatter end padding is not supported\n");
5716 if (curr_wq_state
== IB_WQS_RESET
&& wq_state
== IB_WQS_RDY
) {
5717 if (MLX5_CAP_GEN(dev
->mdev
, modify_rq_counter_set_id
)) {
5718 MLX5_SET64(modify_rq_in
, in
, modify_bitmask
,
5719 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID
);
5720 MLX5_SET(rqc
, rqc
, counter_set_id
,
5721 dev
->port
->cnts
.set_id
);
5723 pr_info_once("%s: Receive WQ counters are not supported on current FW\n",
5727 err
= mlx5_core_modify_rq(dev
->mdev
, rwq
->core_qp
.qpn
, in
, inlen
);
5729 rwq
->ibwq
.state
= (wq_state
== MLX5_RQC_STATE_ERR
) ? IB_WQS_ERR
: wq_state
;
5736 struct mlx5_ib_drain_cqe
{
5738 struct completion done
;
5741 static void mlx5_ib_drain_qp_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
5743 struct mlx5_ib_drain_cqe
*cqe
= container_of(wc
->wr_cqe
,
5744 struct mlx5_ib_drain_cqe
,
5747 complete(&cqe
->done
);
5750 /* This function returns only once the drained WR was completed */
5751 static void handle_drain_completion(struct ib_cq
*cq
,
5752 struct mlx5_ib_drain_cqe
*sdrain
,
5753 struct mlx5_ib_dev
*dev
)
5755 struct mlx5_core_dev
*mdev
= dev
->mdev
;
5757 if (cq
->poll_ctx
== IB_POLL_DIRECT
) {
5758 while (wait_for_completion_timeout(&sdrain
->done
, HZ
/ 10) <= 0)
5759 ib_process_cq_direct(cq
, -1);
5763 if (mdev
->state
== MLX5_DEVICE_STATE_INTERNAL_ERROR
) {
5764 struct mlx5_ib_cq
*mcq
= to_mcq(cq
);
5765 bool triggered
= false;
5766 unsigned long flags
;
5768 spin_lock_irqsave(&dev
->reset_flow_resource_lock
, flags
);
5769 /* Make sure that the CQ handler won't run if wasn't run yet */
5770 if (!mcq
->mcq
.reset_notify_added
)
5771 mcq
->mcq
.reset_notify_added
= 1;
5774 spin_unlock_irqrestore(&dev
->reset_flow_resource_lock
, flags
);
5777 /* Wait for any scheduled/running task to be ended */
5778 switch (cq
->poll_ctx
) {
5779 case IB_POLL_SOFTIRQ
:
5780 irq_poll_disable(&cq
->iop
);
5781 irq_poll_enable(&cq
->iop
);
5783 case IB_POLL_WORKQUEUE
:
5784 cancel_work_sync(&cq
->work
);
5791 /* Run the CQ handler - this makes sure that the drain WR will
5792 * be processed if wasn't processed yet.
5794 mcq
->mcq
.comp(&mcq
->mcq
);
5797 wait_for_completion(&sdrain
->done
);
5800 void mlx5_ib_drain_sq(struct ib_qp
*qp
)
5802 struct ib_cq
*cq
= qp
->send_cq
;
5803 struct ib_qp_attr attr
= { .qp_state
= IB_QPS_ERR
};
5804 struct mlx5_ib_drain_cqe sdrain
;
5805 const struct ib_send_wr
*bad_swr
;
5806 struct ib_rdma_wr swr
= {
5809 { .wr_cqe
= &sdrain
.cqe
, },
5810 .opcode
= IB_WR_RDMA_WRITE
,
5814 struct mlx5_ib_dev
*dev
= to_mdev(qp
->device
);
5815 struct mlx5_core_dev
*mdev
= dev
->mdev
;
5817 ret
= ib_modify_qp(qp
, &attr
, IB_QP_STATE
);
5818 if (ret
&& mdev
->state
!= MLX5_DEVICE_STATE_INTERNAL_ERROR
) {
5819 WARN_ONCE(ret
, "failed to drain send queue: %d\n", ret
);
5823 sdrain
.cqe
.done
= mlx5_ib_drain_qp_done
;
5824 init_completion(&sdrain
.done
);
5826 ret
= _mlx5_ib_post_send(qp
, &swr
.wr
, &bad_swr
, true);
5828 WARN_ONCE(ret
, "failed to drain send queue: %d\n", ret
);
5832 handle_drain_completion(cq
, &sdrain
, dev
);
5835 void mlx5_ib_drain_rq(struct ib_qp
*qp
)
5837 struct ib_cq
*cq
= qp
->recv_cq
;
5838 struct ib_qp_attr attr
= { .qp_state
= IB_QPS_ERR
};
5839 struct mlx5_ib_drain_cqe rdrain
;
5840 struct ib_recv_wr rwr
= {};
5841 const struct ib_recv_wr
*bad_rwr
;
5843 struct mlx5_ib_dev
*dev
= to_mdev(qp
->device
);
5844 struct mlx5_core_dev
*mdev
= dev
->mdev
;
5846 ret
= ib_modify_qp(qp
, &attr
, IB_QP_STATE
);
5847 if (ret
&& mdev
->state
!= MLX5_DEVICE_STATE_INTERNAL_ERROR
) {
5848 WARN_ONCE(ret
, "failed to drain recv queue: %d\n", ret
);
5852 rwr
.wr_cqe
= &rdrain
.cqe
;
5853 rdrain
.cqe
.done
= mlx5_ib_drain_qp_done
;
5854 init_completion(&rdrain
.done
);
5856 ret
= _mlx5_ib_post_recv(qp
, &rwr
, &bad_rwr
, true);
5858 WARN_ONCE(ret
, "failed to drain recv queue: %d\n", ret
);
5862 handle_drain_completion(cq
, &rdrain
, dev
);