2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
34 #include <rdma/ib_umem.h>
35 #include <rdma/ib_cache.h>
36 #include <rdma/ib_user_verbs.h>
37 #include <linux/mlx5/fs.h>
40 /* not supported currently */
41 static int wq_signature
;
44 MLX5_IB_ACK_REQ_FREQ
= 8,
48 MLX5_IB_DEFAULT_SCHED_QUEUE
= 0x83,
49 MLX5_IB_DEFAULT_QP0_SCHED_QUEUE
= 0x3f,
50 MLX5_IB_LINK_TYPE_IB
= 0,
51 MLX5_IB_LINK_TYPE_ETH
= 1
55 MLX5_IB_SQ_STRIDE
= 6,
58 static const u32 mlx5_ib_opcode
[] = {
59 [IB_WR_SEND
] = MLX5_OPCODE_SEND
,
60 [IB_WR_LSO
] = MLX5_OPCODE_LSO
,
61 [IB_WR_SEND_WITH_IMM
] = MLX5_OPCODE_SEND_IMM
,
62 [IB_WR_RDMA_WRITE
] = MLX5_OPCODE_RDMA_WRITE
,
63 [IB_WR_RDMA_WRITE_WITH_IMM
] = MLX5_OPCODE_RDMA_WRITE_IMM
,
64 [IB_WR_RDMA_READ
] = MLX5_OPCODE_RDMA_READ
,
65 [IB_WR_ATOMIC_CMP_AND_SWP
] = MLX5_OPCODE_ATOMIC_CS
,
66 [IB_WR_ATOMIC_FETCH_AND_ADD
] = MLX5_OPCODE_ATOMIC_FA
,
67 [IB_WR_SEND_WITH_INV
] = MLX5_OPCODE_SEND_INVAL
,
68 [IB_WR_LOCAL_INV
] = MLX5_OPCODE_UMR
,
69 [IB_WR_REG_MR
] = MLX5_OPCODE_UMR
,
70 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP
] = MLX5_OPCODE_ATOMIC_MASKED_CS
,
71 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
] = MLX5_OPCODE_ATOMIC_MASKED_FA
,
72 [MLX5_IB_WR_UMR
] = MLX5_OPCODE_UMR
,
75 struct mlx5_wqe_eth_pad
{
79 enum raw_qp_set_mask_map
{
80 MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID
= 1UL << 0,
81 MLX5_RAW_QP_RATE_LIMIT
= 1UL << 1,
84 struct mlx5_modify_raw_qp_param
{
87 u32 set_mask
; /* raw_qp_set_mask_map */
92 static void get_cqs(enum ib_qp_type qp_type
,
93 struct ib_cq
*ib_send_cq
, struct ib_cq
*ib_recv_cq
,
94 struct mlx5_ib_cq
**send_cq
, struct mlx5_ib_cq
**recv_cq
);
96 static int is_qp0(enum ib_qp_type qp_type
)
98 return qp_type
== IB_QPT_SMI
;
101 static int is_sqp(enum ib_qp_type qp_type
)
103 return is_qp0(qp_type
) || is_qp1(qp_type
);
106 static void *get_wqe(struct mlx5_ib_qp
*qp
, int offset
)
108 return mlx5_buf_offset(&qp
->buf
, offset
);
111 static void *get_recv_wqe(struct mlx5_ib_qp
*qp
, int n
)
113 return get_wqe(qp
, qp
->rq
.offset
+ (n
<< qp
->rq
.wqe_shift
));
116 void *mlx5_get_send_wqe(struct mlx5_ib_qp
*qp
, int n
)
118 return get_wqe(qp
, qp
->sq
.offset
+ (n
<< MLX5_IB_SQ_STRIDE
));
122 * mlx5_ib_read_user_wqe() - Copy a user-space WQE to kernel space.
124 * @qp: QP to copy from.
125 * @send: copy from the send queue when non-zero, use the receive queue
127 * @wqe_index: index to start copying from. For send work queues, the
128 * wqe_index is in units of MLX5_SEND_WQE_BB.
129 * For receive work queue, it is the number of work queue
130 * element in the queue.
131 * @buffer: destination buffer.
132 * @length: maximum number of bytes to copy.
134 * Copies at least a single WQE, but may copy more data.
136 * Return: the number of bytes copied, or an error code.
138 int mlx5_ib_read_user_wqe(struct mlx5_ib_qp
*qp
, int send
, int wqe_index
,
139 void *buffer
, u32 length
,
140 struct mlx5_ib_qp_base
*base
)
142 struct ib_device
*ibdev
= qp
->ibqp
.device
;
143 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
144 struct mlx5_ib_wq
*wq
= send
? &qp
->sq
: &qp
->rq
;
147 struct ib_umem
*umem
= base
->ubuffer
.umem
;
148 u32 first_copy_length
;
152 if (wq
->wqe_cnt
== 0) {
153 mlx5_ib_dbg(dev
, "mlx5_ib_read_user_wqe for a QP with wqe_cnt == 0. qp_type: 0x%x\n",
158 offset
= wq
->offset
+ ((wqe_index
% wq
->wqe_cnt
) << wq
->wqe_shift
);
159 wq_end
= wq
->offset
+ (wq
->wqe_cnt
<< wq
->wqe_shift
);
161 if (send
&& length
< sizeof(struct mlx5_wqe_ctrl_seg
))
164 if (offset
> umem
->length
||
165 (send
&& offset
+ sizeof(struct mlx5_wqe_ctrl_seg
) > umem
->length
))
168 first_copy_length
= min_t(u32
, offset
+ length
, wq_end
) - offset
;
169 ret
= ib_umem_copy_from(buffer
, umem
, offset
, first_copy_length
);
174 struct mlx5_wqe_ctrl_seg
*ctrl
= buffer
;
175 int ds
= be32_to_cpu(ctrl
->qpn_ds
) & MLX5_WQE_CTRL_DS_MASK
;
177 wqe_length
= ds
* MLX5_WQE_DS_UNITS
;
179 wqe_length
= 1 << wq
->wqe_shift
;
182 if (wqe_length
<= first_copy_length
)
183 return first_copy_length
;
185 ret
= ib_umem_copy_from(buffer
+ first_copy_length
, umem
, wq
->offset
,
186 wqe_length
- first_copy_length
);
193 static void mlx5_ib_qp_event(struct mlx5_core_qp
*qp
, int type
)
195 struct ib_qp
*ibqp
= &to_mibqp(qp
)->ibqp
;
196 struct ib_event event
;
198 if (type
== MLX5_EVENT_TYPE_PATH_MIG
) {
199 /* This event is only valid for trans_qps */
200 to_mibqp(qp
)->port
= to_mibqp(qp
)->trans_qp
.alt_port
;
203 if (ibqp
->event_handler
) {
204 event
.device
= ibqp
->device
;
205 event
.element
.qp
= ibqp
;
207 case MLX5_EVENT_TYPE_PATH_MIG
:
208 event
.event
= IB_EVENT_PATH_MIG
;
210 case MLX5_EVENT_TYPE_COMM_EST
:
211 event
.event
= IB_EVENT_COMM_EST
;
213 case MLX5_EVENT_TYPE_SQ_DRAINED
:
214 event
.event
= IB_EVENT_SQ_DRAINED
;
216 case MLX5_EVENT_TYPE_SRQ_LAST_WQE
:
217 event
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
219 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR
:
220 event
.event
= IB_EVENT_QP_FATAL
;
222 case MLX5_EVENT_TYPE_PATH_MIG_FAILED
:
223 event
.event
= IB_EVENT_PATH_MIG_ERR
;
225 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR
:
226 event
.event
= IB_EVENT_QP_REQ_ERR
;
228 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR
:
229 event
.event
= IB_EVENT_QP_ACCESS_ERR
;
232 pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type
, qp
->qpn
);
236 ibqp
->event_handler(&event
, ibqp
->qp_context
);
240 static int set_rq_size(struct mlx5_ib_dev
*dev
, struct ib_qp_cap
*cap
,
241 int has_rq
, struct mlx5_ib_qp
*qp
, struct mlx5_ib_create_qp
*ucmd
)
246 /* Sanity check RQ size before proceeding */
247 if (cap
->max_recv_wr
> (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_qp_sz
)))
253 qp
->rq
.wqe_shift
= 0;
254 cap
->max_recv_wr
= 0;
255 cap
->max_recv_sge
= 0;
258 qp
->rq
.wqe_cnt
= ucmd
->rq_wqe_count
;
259 qp
->rq
.wqe_shift
= ucmd
->rq_wqe_shift
;
260 qp
->rq
.max_gs
= (1 << qp
->rq
.wqe_shift
) / sizeof(struct mlx5_wqe_data_seg
) - qp
->wq_sig
;
261 qp
->rq
.max_post
= qp
->rq
.wqe_cnt
;
263 wqe_size
= qp
->wq_sig
? sizeof(struct mlx5_wqe_signature_seg
) : 0;
264 wqe_size
+= cap
->max_recv_sge
* sizeof(struct mlx5_wqe_data_seg
);
265 wqe_size
= roundup_pow_of_two(wqe_size
);
266 wq_size
= roundup_pow_of_two(cap
->max_recv_wr
) * wqe_size
;
267 wq_size
= max_t(int, wq_size
, MLX5_SEND_WQE_BB
);
268 qp
->rq
.wqe_cnt
= wq_size
/ wqe_size
;
269 if (wqe_size
> MLX5_CAP_GEN(dev
->mdev
, max_wqe_sz_rq
)) {
270 mlx5_ib_dbg(dev
, "wqe_size %d, max %d\n",
272 MLX5_CAP_GEN(dev
->mdev
,
276 qp
->rq
.wqe_shift
= ilog2(wqe_size
);
277 qp
->rq
.max_gs
= (1 << qp
->rq
.wqe_shift
) / sizeof(struct mlx5_wqe_data_seg
) - qp
->wq_sig
;
278 qp
->rq
.max_post
= qp
->rq
.wqe_cnt
;
285 static int sq_overhead(struct ib_qp_init_attr
*attr
)
289 switch (attr
->qp_type
) {
291 size
+= sizeof(struct mlx5_wqe_xrc_seg
);
294 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
295 max(sizeof(struct mlx5_wqe_atomic_seg
) +
296 sizeof(struct mlx5_wqe_raddr_seg
),
297 sizeof(struct mlx5_wqe_umr_ctrl_seg
) +
298 sizeof(struct mlx5_mkey_seg
));
305 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
306 max(sizeof(struct mlx5_wqe_raddr_seg
),
307 sizeof(struct mlx5_wqe_umr_ctrl_seg
) +
308 sizeof(struct mlx5_mkey_seg
));
312 if (attr
->create_flags
& IB_QP_CREATE_IPOIB_UD_LSO
)
313 size
+= sizeof(struct mlx5_wqe_eth_pad
) +
314 sizeof(struct mlx5_wqe_eth_seg
);
317 case MLX5_IB_QPT_HW_GSI
:
318 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
319 sizeof(struct mlx5_wqe_datagram_seg
);
322 case MLX5_IB_QPT_REG_UMR
:
323 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
324 sizeof(struct mlx5_wqe_umr_ctrl_seg
) +
325 sizeof(struct mlx5_mkey_seg
);
335 static int calc_send_wqe(struct ib_qp_init_attr
*attr
)
340 size
= sq_overhead(attr
);
344 if (attr
->cap
.max_inline_data
) {
345 inl_size
= size
+ sizeof(struct mlx5_wqe_inline_seg
) +
346 attr
->cap
.max_inline_data
;
349 size
+= attr
->cap
.max_send_sge
* sizeof(struct mlx5_wqe_data_seg
);
350 if (attr
->create_flags
& IB_QP_CREATE_SIGNATURE_EN
&&
351 ALIGN(max_t(int, inl_size
, size
), MLX5_SEND_WQE_BB
) < MLX5_SIG_WQE_SIZE
)
352 return MLX5_SIG_WQE_SIZE
;
354 return ALIGN(max_t(int, inl_size
, size
), MLX5_SEND_WQE_BB
);
357 static int get_send_sge(struct ib_qp_init_attr
*attr
, int wqe_size
)
361 if (attr
->qp_type
== IB_QPT_RC
)
362 max_sge
= (min_t(int, wqe_size
, 512) -
363 sizeof(struct mlx5_wqe_ctrl_seg
) -
364 sizeof(struct mlx5_wqe_raddr_seg
)) /
365 sizeof(struct mlx5_wqe_data_seg
);
366 else if (attr
->qp_type
== IB_QPT_XRC_INI
)
367 max_sge
= (min_t(int, wqe_size
, 512) -
368 sizeof(struct mlx5_wqe_ctrl_seg
) -
369 sizeof(struct mlx5_wqe_xrc_seg
) -
370 sizeof(struct mlx5_wqe_raddr_seg
)) /
371 sizeof(struct mlx5_wqe_data_seg
);
373 max_sge
= (wqe_size
- sq_overhead(attr
)) /
374 sizeof(struct mlx5_wqe_data_seg
);
376 return min_t(int, max_sge
, wqe_size
- sq_overhead(attr
) /
377 sizeof(struct mlx5_wqe_data_seg
));
380 static int calc_sq_size(struct mlx5_ib_dev
*dev
, struct ib_qp_init_attr
*attr
,
381 struct mlx5_ib_qp
*qp
)
386 if (!attr
->cap
.max_send_wr
)
389 wqe_size
= calc_send_wqe(attr
);
390 mlx5_ib_dbg(dev
, "wqe_size %d\n", wqe_size
);
394 if (wqe_size
> MLX5_CAP_GEN(dev
->mdev
, max_wqe_sz_sq
)) {
395 mlx5_ib_dbg(dev
, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
396 wqe_size
, MLX5_CAP_GEN(dev
->mdev
, max_wqe_sz_sq
));
400 qp
->max_inline_data
= wqe_size
- sq_overhead(attr
) -
401 sizeof(struct mlx5_wqe_inline_seg
);
402 attr
->cap
.max_inline_data
= qp
->max_inline_data
;
404 if (attr
->create_flags
& IB_QP_CREATE_SIGNATURE_EN
)
405 qp
->signature_en
= true;
407 wq_size
= roundup_pow_of_two(attr
->cap
.max_send_wr
* wqe_size
);
408 qp
->sq
.wqe_cnt
= wq_size
/ MLX5_SEND_WQE_BB
;
409 if (qp
->sq
.wqe_cnt
> (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_qp_sz
))) {
410 mlx5_ib_dbg(dev
, "send queue size (%d * %d / %d -> %d) exceeds limits(%d)\n",
411 attr
->cap
.max_send_wr
, wqe_size
, MLX5_SEND_WQE_BB
,
413 1 << MLX5_CAP_GEN(dev
->mdev
, log_max_qp_sz
));
416 qp
->sq
.wqe_shift
= ilog2(MLX5_SEND_WQE_BB
);
417 qp
->sq
.max_gs
= get_send_sge(attr
, wqe_size
);
418 if (qp
->sq
.max_gs
< attr
->cap
.max_send_sge
)
421 attr
->cap
.max_send_sge
= qp
->sq
.max_gs
;
422 qp
->sq
.max_post
= wq_size
/ wqe_size
;
423 attr
->cap
.max_send_wr
= qp
->sq
.max_post
;
428 static int set_user_buf_size(struct mlx5_ib_dev
*dev
,
429 struct mlx5_ib_qp
*qp
,
430 struct mlx5_ib_create_qp
*ucmd
,
431 struct mlx5_ib_qp_base
*base
,
432 struct ib_qp_init_attr
*attr
)
434 int desc_sz
= 1 << qp
->sq
.wqe_shift
;
436 if (desc_sz
> MLX5_CAP_GEN(dev
->mdev
, max_wqe_sz_sq
)) {
437 mlx5_ib_warn(dev
, "desc_sz %d, max_sq_desc_sz %d\n",
438 desc_sz
, MLX5_CAP_GEN(dev
->mdev
, max_wqe_sz_sq
));
442 if (ucmd
->sq_wqe_count
&& ((1 << ilog2(ucmd
->sq_wqe_count
)) != ucmd
->sq_wqe_count
)) {
443 mlx5_ib_warn(dev
, "sq_wqe_count %d, sq_wqe_count %d\n",
444 ucmd
->sq_wqe_count
, ucmd
->sq_wqe_count
);
448 qp
->sq
.wqe_cnt
= ucmd
->sq_wqe_count
;
450 if (qp
->sq
.wqe_cnt
> (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_qp_sz
))) {
451 mlx5_ib_warn(dev
, "wqe_cnt %d, max_wqes %d\n",
453 1 << MLX5_CAP_GEN(dev
->mdev
, log_max_qp_sz
));
457 if (attr
->qp_type
== IB_QPT_RAW_PACKET
||
458 qp
->flags
& MLX5_IB_QP_UNDERLAY
) {
459 base
->ubuffer
.buf_size
= qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
;
460 qp
->raw_packet_qp
.sq
.ubuffer
.buf_size
= qp
->sq
.wqe_cnt
<< 6;
462 base
->ubuffer
.buf_size
= (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
) +
463 (qp
->sq
.wqe_cnt
<< 6);
469 static int qp_has_rq(struct ib_qp_init_attr
*attr
)
471 if (attr
->qp_type
== IB_QPT_XRC_INI
||
472 attr
->qp_type
== IB_QPT_XRC_TGT
|| attr
->srq
||
473 attr
->qp_type
== MLX5_IB_QPT_REG_UMR
||
474 !attr
->cap
.max_recv_wr
)
480 static int first_med_bfreg(void)
486 /* this is the first blue flame register in the array of bfregs assigned
487 * to a processes. Since we do not use it for blue flame but rather
488 * regular 64 bit doorbells, we do not need a lock for maintaiing
491 NUM_NON_BLUE_FLAME_BFREGS
= 1,
494 static int max_bfregs(struct mlx5_ib_dev
*dev
, struct mlx5_bfreg_info
*bfregi
)
496 return get_num_uars(dev
, bfregi
) * MLX5_NON_FP_BFREGS_PER_UAR
;
499 static int num_med_bfreg(struct mlx5_ib_dev
*dev
,
500 struct mlx5_bfreg_info
*bfregi
)
504 n
= max_bfregs(dev
, bfregi
) - bfregi
->num_low_latency_bfregs
-
505 NUM_NON_BLUE_FLAME_BFREGS
;
507 return n
>= 0 ? n
: 0;
510 static int first_hi_bfreg(struct mlx5_ib_dev
*dev
,
511 struct mlx5_bfreg_info
*bfregi
)
515 med
= num_med_bfreg(dev
, bfregi
);
519 static int alloc_high_class_bfreg(struct mlx5_ib_dev
*dev
,
520 struct mlx5_bfreg_info
*bfregi
)
524 for (i
= first_hi_bfreg(dev
, bfregi
); i
< max_bfregs(dev
, bfregi
); i
++) {
525 if (!bfregi
->count
[i
]) {
534 static int alloc_med_class_bfreg(struct mlx5_ib_dev
*dev
,
535 struct mlx5_bfreg_info
*bfregi
)
537 int minidx
= first_med_bfreg();
540 for (i
= first_med_bfreg(); i
< first_hi_bfreg(dev
, bfregi
); i
++) {
541 if (bfregi
->count
[i
] < bfregi
->count
[minidx
])
543 if (!bfregi
->count
[minidx
])
547 bfregi
->count
[minidx
]++;
551 static int alloc_bfreg(struct mlx5_ib_dev
*dev
,
552 struct mlx5_bfreg_info
*bfregi
,
553 enum mlx5_ib_latency_class lat
)
555 int bfregn
= -EINVAL
;
557 mutex_lock(&bfregi
->lock
);
559 case MLX5_IB_LATENCY_CLASS_LOW
:
560 BUILD_BUG_ON(NUM_NON_BLUE_FLAME_BFREGS
!= 1);
562 bfregi
->count
[bfregn
]++;
565 case MLX5_IB_LATENCY_CLASS_MEDIUM
:
569 bfregn
= alloc_med_class_bfreg(dev
, bfregi
);
572 case MLX5_IB_LATENCY_CLASS_HIGH
:
576 bfregn
= alloc_high_class_bfreg(dev
, bfregi
);
579 mutex_unlock(&bfregi
->lock
);
584 static void free_bfreg(struct mlx5_ib_dev
*dev
, struct mlx5_bfreg_info
*bfregi
, int bfregn
)
586 mutex_lock(&bfregi
->lock
);
587 bfregi
->count
[bfregn
]--;
588 mutex_unlock(&bfregi
->lock
);
591 static enum mlx5_qp_state
to_mlx5_state(enum ib_qp_state state
)
594 case IB_QPS_RESET
: return MLX5_QP_STATE_RST
;
595 case IB_QPS_INIT
: return MLX5_QP_STATE_INIT
;
596 case IB_QPS_RTR
: return MLX5_QP_STATE_RTR
;
597 case IB_QPS_RTS
: return MLX5_QP_STATE_RTS
;
598 case IB_QPS_SQD
: return MLX5_QP_STATE_SQD
;
599 case IB_QPS_SQE
: return MLX5_QP_STATE_SQER
;
600 case IB_QPS_ERR
: return MLX5_QP_STATE_ERR
;
605 static int to_mlx5_st(enum ib_qp_type type
)
608 case IB_QPT_RC
: return MLX5_QP_ST_RC
;
609 case IB_QPT_UC
: return MLX5_QP_ST_UC
;
610 case IB_QPT_UD
: return MLX5_QP_ST_UD
;
611 case MLX5_IB_QPT_REG_UMR
: return MLX5_QP_ST_REG_UMR
;
613 case IB_QPT_XRC_TGT
: return MLX5_QP_ST_XRC
;
614 case IB_QPT_SMI
: return MLX5_QP_ST_QP0
;
615 case MLX5_IB_QPT_HW_GSI
: return MLX5_QP_ST_QP1
;
616 case IB_QPT_RAW_IPV6
: return MLX5_QP_ST_RAW_IPV6
;
617 case IB_QPT_RAW_PACKET
:
618 case IB_QPT_RAW_ETHERTYPE
: return MLX5_QP_ST_RAW_ETHERTYPE
;
620 default: return -EINVAL
;
624 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq
*send_cq
,
625 struct mlx5_ib_cq
*recv_cq
);
626 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq
*send_cq
,
627 struct mlx5_ib_cq
*recv_cq
);
629 static int bfregn_to_uar_index(struct mlx5_ib_dev
*dev
,
630 struct mlx5_bfreg_info
*bfregi
, int bfregn
)
632 int bfregs_per_sys_page
;
633 int index_of_sys_page
;
636 bfregs_per_sys_page
= get_uars_per_sys_page(dev
, bfregi
->lib_uar_4k
) *
637 MLX5_NON_FP_BFREGS_PER_UAR
;
638 index_of_sys_page
= bfregn
/ bfregs_per_sys_page
;
640 offset
= bfregn
% bfregs_per_sys_page
/ MLX5_NON_FP_BFREGS_PER_UAR
;
642 return bfregi
->sys_pages
[index_of_sys_page
] + offset
;
645 static int mlx5_ib_umem_get(struct mlx5_ib_dev
*dev
,
647 unsigned long addr
, size_t size
,
648 struct ib_umem
**umem
,
649 int *npages
, int *page_shift
, int *ncont
,
654 *umem
= ib_umem_get(pd
->uobject
->context
, addr
, size
, 0, 0);
656 mlx5_ib_dbg(dev
, "umem_get failed\n");
657 return PTR_ERR(*umem
);
660 mlx5_ib_cont_pages(*umem
, addr
, 0, npages
, page_shift
, ncont
, NULL
);
662 err
= mlx5_ib_get_buf_offset(addr
, *page_shift
, offset
);
664 mlx5_ib_warn(dev
, "bad offset\n");
668 mlx5_ib_dbg(dev
, "addr 0x%lx, size %zu, npages %d, page_shift %d, ncont %d, offset %d\n",
669 addr
, size
, *npages
, *page_shift
, *ncont
, *offset
);
674 ib_umem_release(*umem
);
680 static void destroy_user_rq(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
681 struct mlx5_ib_rwq
*rwq
)
683 struct mlx5_ib_ucontext
*context
;
685 if (rwq
->create_flags
& MLX5_IB_WQ_FLAGS_DELAY_DROP
)
686 atomic_dec(&dev
->delay_drop
.rqs_cnt
);
688 context
= to_mucontext(pd
->uobject
->context
);
689 mlx5_ib_db_unmap_user(context
, &rwq
->db
);
691 ib_umem_release(rwq
->umem
);
694 static int create_user_rq(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
695 struct mlx5_ib_rwq
*rwq
,
696 struct mlx5_ib_create_wq
*ucmd
)
698 struct mlx5_ib_ucontext
*context
;
708 context
= to_mucontext(pd
->uobject
->context
);
709 rwq
->umem
= ib_umem_get(pd
->uobject
->context
, ucmd
->buf_addr
,
710 rwq
->buf_size
, 0, 0);
711 if (IS_ERR(rwq
->umem
)) {
712 mlx5_ib_dbg(dev
, "umem_get failed\n");
713 err
= PTR_ERR(rwq
->umem
);
717 mlx5_ib_cont_pages(rwq
->umem
, ucmd
->buf_addr
, 0, &npages
, &page_shift
,
719 err
= mlx5_ib_get_buf_offset(ucmd
->buf_addr
, page_shift
,
720 &rwq
->rq_page_offset
);
722 mlx5_ib_warn(dev
, "bad offset\n");
726 rwq
->rq_num_pas
= ncont
;
727 rwq
->page_shift
= page_shift
;
728 rwq
->log_page_size
= page_shift
- MLX5_ADAPTER_PAGE_SHIFT
;
729 rwq
->wq_sig
= !!(ucmd
->flags
& MLX5_WQ_FLAG_SIGNATURE
);
731 mlx5_ib_dbg(dev
, "addr 0x%llx, size %zd, npages %d, page_shift %d, ncont %d, offset %d\n",
732 (unsigned long long)ucmd
->buf_addr
, rwq
->buf_size
,
733 npages
, page_shift
, ncont
, offset
);
735 err
= mlx5_ib_db_map_user(context
, ucmd
->db_addr
, &rwq
->db
);
737 mlx5_ib_dbg(dev
, "map failed\n");
741 rwq
->create_type
= MLX5_WQ_USER
;
745 ib_umem_release(rwq
->umem
);
749 static int adjust_bfregn(struct mlx5_ib_dev
*dev
,
750 struct mlx5_bfreg_info
*bfregi
, int bfregn
)
752 return bfregn
/ MLX5_NON_FP_BFREGS_PER_UAR
* MLX5_BFREGS_PER_UAR
+
753 bfregn
% MLX5_NON_FP_BFREGS_PER_UAR
;
756 static int create_user_qp(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
757 struct mlx5_ib_qp
*qp
, struct ib_udata
*udata
,
758 struct ib_qp_init_attr
*attr
,
760 struct mlx5_ib_create_qp_resp
*resp
, int *inlen
,
761 struct mlx5_ib_qp_base
*base
)
763 struct mlx5_ib_ucontext
*context
;
764 struct mlx5_ib_create_qp ucmd
;
765 struct mlx5_ib_ubuffer
*ubuffer
= &base
->ubuffer
;
776 err
= ib_copy_from_udata(&ucmd
, udata
, sizeof(ucmd
));
778 mlx5_ib_dbg(dev
, "copy failed\n");
782 context
= to_mucontext(pd
->uobject
->context
);
784 * TBD: should come from the verbs when we have the API
786 if (qp
->flags
& MLX5_IB_QP_CROSS_CHANNEL
)
787 /* In CROSS_CHANNEL CQ and QP must use the same UAR */
788 bfregn
= MLX5_CROSS_CHANNEL_BFREG
;
790 bfregn
= alloc_bfreg(dev
, &context
->bfregi
, MLX5_IB_LATENCY_CLASS_HIGH
);
792 mlx5_ib_dbg(dev
, "failed to allocate low latency BFREG\n");
793 mlx5_ib_dbg(dev
, "reverting to medium latency\n");
794 bfregn
= alloc_bfreg(dev
, &context
->bfregi
, MLX5_IB_LATENCY_CLASS_MEDIUM
);
796 mlx5_ib_dbg(dev
, "failed to allocate medium latency BFREG\n");
797 mlx5_ib_dbg(dev
, "reverting to high latency\n");
798 bfregn
= alloc_bfreg(dev
, &context
->bfregi
, MLX5_IB_LATENCY_CLASS_LOW
);
800 mlx5_ib_warn(dev
, "bfreg allocation failed\n");
807 uar_index
= bfregn_to_uar_index(dev
, &context
->bfregi
, bfregn
);
808 mlx5_ib_dbg(dev
, "bfregn 0x%x, uar_index 0x%x\n", bfregn
, uar_index
);
811 qp
->sq
.wqe_shift
= ilog2(MLX5_SEND_WQE_BB
);
812 qp
->sq
.offset
= qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
;
814 err
= set_user_buf_size(dev
, qp
, &ucmd
, base
, attr
);
818 if (ucmd
.buf_addr
&& ubuffer
->buf_size
) {
819 ubuffer
->buf_addr
= ucmd
.buf_addr
;
820 err
= mlx5_ib_umem_get(dev
, pd
, ubuffer
->buf_addr
,
822 &ubuffer
->umem
, &npages
, &page_shift
,
827 ubuffer
->umem
= NULL
;
830 *inlen
= MLX5_ST_SZ_BYTES(create_qp_in
) +
831 MLX5_FLD_SZ_BYTES(create_qp_in
, pas
[0]) * ncont
;
832 *in
= kvzalloc(*inlen
, GFP_KERNEL
);
838 pas
= (__be64
*)MLX5_ADDR_OF(create_qp_in
, *in
, pas
);
840 mlx5_ib_populate_pas(dev
, ubuffer
->umem
, page_shift
, pas
, 0);
842 qpc
= MLX5_ADDR_OF(create_qp_in
, *in
, qpc
);
844 MLX5_SET(qpc
, qpc
, log_page_size
, page_shift
- MLX5_ADAPTER_PAGE_SHIFT
);
845 MLX5_SET(qpc
, qpc
, page_offset
, offset
);
847 MLX5_SET(qpc
, qpc
, uar_page
, uar_index
);
848 resp
->bfreg_index
= adjust_bfregn(dev
, &context
->bfregi
, bfregn
);
851 err
= mlx5_ib_db_map_user(context
, ucmd
.db_addr
, &qp
->db
);
853 mlx5_ib_dbg(dev
, "map failed\n");
857 err
= ib_copy_to_udata(udata
, resp
, sizeof(*resp
));
859 mlx5_ib_dbg(dev
, "copy failed\n");
862 qp
->create_type
= MLX5_QP_USER
;
867 mlx5_ib_db_unmap_user(context
, &qp
->db
);
874 ib_umem_release(ubuffer
->umem
);
877 free_bfreg(dev
, &context
->bfregi
, bfregn
);
881 static void destroy_qp_user(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
882 struct mlx5_ib_qp
*qp
, struct mlx5_ib_qp_base
*base
)
884 struct mlx5_ib_ucontext
*context
;
886 context
= to_mucontext(pd
->uobject
->context
);
887 mlx5_ib_db_unmap_user(context
, &qp
->db
);
888 if (base
->ubuffer
.umem
)
889 ib_umem_release(base
->ubuffer
.umem
);
890 free_bfreg(dev
, &context
->bfregi
, qp
->bfregn
);
893 static int create_kernel_qp(struct mlx5_ib_dev
*dev
,
894 struct ib_qp_init_attr
*init_attr
,
895 struct mlx5_ib_qp
*qp
,
896 u32
**in
, int *inlen
,
897 struct mlx5_ib_qp_base
*base
)
903 if (init_attr
->create_flags
& ~(IB_QP_CREATE_SIGNATURE_EN
|
904 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
|
905 IB_QP_CREATE_IPOIB_UD_LSO
|
906 IB_QP_CREATE_NETIF_QP
|
907 mlx5_ib_create_qp_sqpn_qp1()))
910 if (init_attr
->qp_type
== MLX5_IB_QPT_REG_UMR
)
911 qp
->bf
.bfreg
= &dev
->fp_bfreg
;
913 qp
->bf
.bfreg
= &dev
->bfreg
;
915 /* We need to divide by two since each register is comprised of
916 * two buffers of identical size, namely odd and even
918 qp
->bf
.buf_size
= (1 << MLX5_CAP_GEN(dev
->mdev
, log_bf_reg_size
)) / 2;
919 uar_index
= qp
->bf
.bfreg
->index
;
921 err
= calc_sq_size(dev
, init_attr
, qp
);
923 mlx5_ib_dbg(dev
, "err %d\n", err
);
928 qp
->sq
.offset
= qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
;
929 base
->ubuffer
.buf_size
= err
+ (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
);
931 err
= mlx5_buf_alloc(dev
->mdev
, base
->ubuffer
.buf_size
, &qp
->buf
);
933 mlx5_ib_dbg(dev
, "err %d\n", err
);
937 qp
->sq
.qend
= mlx5_get_send_wqe(qp
, qp
->sq
.wqe_cnt
);
938 *inlen
= MLX5_ST_SZ_BYTES(create_qp_in
) +
939 MLX5_FLD_SZ_BYTES(create_qp_in
, pas
[0]) * qp
->buf
.npages
;
940 *in
= kvzalloc(*inlen
, GFP_KERNEL
);
946 qpc
= MLX5_ADDR_OF(create_qp_in
, *in
, qpc
);
947 MLX5_SET(qpc
, qpc
, uar_page
, uar_index
);
948 MLX5_SET(qpc
, qpc
, log_page_size
, qp
->buf
.page_shift
- MLX5_ADAPTER_PAGE_SHIFT
);
950 /* Set "fast registration enabled" for all kernel QPs */
951 MLX5_SET(qpc
, qpc
, fre
, 1);
952 MLX5_SET(qpc
, qpc
, rlky
, 1);
954 if (init_attr
->create_flags
& mlx5_ib_create_qp_sqpn_qp1()) {
955 MLX5_SET(qpc
, qpc
, deth_sqpn
, 1);
956 qp
->flags
|= MLX5_IB_QP_SQPN_QP1
;
959 mlx5_fill_page_array(&qp
->buf
,
960 (__be64
*)MLX5_ADDR_OF(create_qp_in
, *in
, pas
));
962 err
= mlx5_db_alloc(dev
->mdev
, &qp
->db
);
964 mlx5_ib_dbg(dev
, "err %d\n", err
);
968 qp
->sq
.wrid
= kvmalloc_array(qp
->sq
.wqe_cnt
,
969 sizeof(*qp
->sq
.wrid
), GFP_KERNEL
);
970 qp
->sq
.wr_data
= kvmalloc_array(qp
->sq
.wqe_cnt
,
971 sizeof(*qp
->sq
.wr_data
), GFP_KERNEL
);
972 qp
->rq
.wrid
= kvmalloc_array(qp
->rq
.wqe_cnt
,
973 sizeof(*qp
->rq
.wrid
), GFP_KERNEL
);
974 qp
->sq
.w_list
= kvmalloc_array(qp
->sq
.wqe_cnt
,
975 sizeof(*qp
->sq
.w_list
), GFP_KERNEL
);
976 qp
->sq
.wqe_head
= kvmalloc_array(qp
->sq
.wqe_cnt
,
977 sizeof(*qp
->sq
.wqe_head
), GFP_KERNEL
);
979 if (!qp
->sq
.wrid
|| !qp
->sq
.wr_data
|| !qp
->rq
.wrid
||
980 !qp
->sq
.w_list
|| !qp
->sq
.wqe_head
) {
984 qp
->create_type
= MLX5_QP_KERNEL
;
989 kvfree(qp
->sq
.wqe_head
);
990 kvfree(qp
->sq
.w_list
);
992 kvfree(qp
->sq
.wr_data
);
994 mlx5_db_free(dev
->mdev
, &qp
->db
);
1000 mlx5_buf_free(dev
->mdev
, &qp
->buf
);
1004 static void destroy_qp_kernel(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
)
1006 kvfree(qp
->sq
.wqe_head
);
1007 kvfree(qp
->sq
.w_list
);
1008 kvfree(qp
->sq
.wrid
);
1009 kvfree(qp
->sq
.wr_data
);
1010 kvfree(qp
->rq
.wrid
);
1011 mlx5_db_free(dev
->mdev
, &qp
->db
);
1012 mlx5_buf_free(dev
->mdev
, &qp
->buf
);
1015 static u32
get_rx_type(struct mlx5_ib_qp
*qp
, struct ib_qp_init_attr
*attr
)
1017 if (attr
->srq
|| (attr
->qp_type
== IB_QPT_XRC_TGT
) ||
1018 (attr
->qp_type
== IB_QPT_XRC_INI
))
1020 else if (!qp
->has_rq
)
1021 return MLX5_ZERO_LEN_RQ
;
1023 return MLX5_NON_ZERO_RQ
;
1026 static int is_connected(enum ib_qp_type qp_type
)
1028 if (qp_type
== IB_QPT_RC
|| qp_type
== IB_QPT_UC
)
1034 static int create_raw_packet_qp_tis(struct mlx5_ib_dev
*dev
,
1035 struct mlx5_ib_qp
*qp
,
1036 struct mlx5_ib_sq
*sq
, u32 tdn
)
1038 u32 in
[MLX5_ST_SZ_DW(create_tis_in
)] = {0};
1039 void *tisc
= MLX5_ADDR_OF(create_tis_in
, in
, ctx
);
1041 MLX5_SET(tisc
, tisc
, transport_domain
, tdn
);
1042 if (qp
->flags
& MLX5_IB_QP_UNDERLAY
)
1043 MLX5_SET(tisc
, tisc
, underlay_qpn
, qp
->underlay_qpn
);
1045 return mlx5_core_create_tis(dev
->mdev
, in
, sizeof(in
), &sq
->tisn
);
1048 static void destroy_raw_packet_qp_tis(struct mlx5_ib_dev
*dev
,
1049 struct mlx5_ib_sq
*sq
)
1051 mlx5_core_destroy_tis(dev
->mdev
, sq
->tisn
);
1054 static int create_raw_packet_qp_sq(struct mlx5_ib_dev
*dev
,
1055 struct mlx5_ib_sq
*sq
, void *qpin
,
1058 struct mlx5_ib_ubuffer
*ubuffer
= &sq
->ubuffer
;
1062 void *qpc
= MLX5_ADDR_OF(create_qp_in
, qpin
, qpc
);
1071 err
= mlx5_ib_umem_get(dev
, pd
, ubuffer
->buf_addr
, ubuffer
->buf_size
,
1072 &sq
->ubuffer
.umem
, &npages
, &page_shift
,
1077 inlen
= MLX5_ST_SZ_BYTES(create_sq_in
) + sizeof(u64
) * ncont
;
1078 in
= kvzalloc(inlen
, GFP_KERNEL
);
1084 sqc
= MLX5_ADDR_OF(create_sq_in
, in
, ctx
);
1085 MLX5_SET(sqc
, sqc
, flush_in_error_en
, 1);
1086 if (MLX5_CAP_ETH(dev
->mdev
, multi_pkt_send_wqe
))
1087 MLX5_SET(sqc
, sqc
, allow_multi_pkt_send_wqe
, 1);
1088 MLX5_SET(sqc
, sqc
, state
, MLX5_SQC_STATE_RST
);
1089 MLX5_SET(sqc
, sqc
, user_index
, MLX5_GET(qpc
, qpc
, user_index
));
1090 MLX5_SET(sqc
, sqc
, cqn
, MLX5_GET(qpc
, qpc
, cqn_snd
));
1091 MLX5_SET(sqc
, sqc
, tis_lst_sz
, 1);
1092 MLX5_SET(sqc
, sqc
, tis_num_0
, sq
->tisn
);
1093 if (MLX5_CAP_GEN(dev
->mdev
, eth_net_offloads
) &&
1094 MLX5_CAP_ETH(dev
->mdev
, swp
))
1095 MLX5_SET(sqc
, sqc
, allow_swp
, 1);
1097 wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
1098 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_CYCLIC
);
1099 MLX5_SET(wq
, wq
, pd
, MLX5_GET(qpc
, qpc
, pd
));
1100 MLX5_SET(wq
, wq
, uar_page
, MLX5_GET(qpc
, qpc
, uar_page
));
1101 MLX5_SET64(wq
, wq
, dbr_addr
, MLX5_GET64(qpc
, qpc
, dbr_addr
));
1102 MLX5_SET(wq
, wq
, log_wq_stride
, ilog2(MLX5_SEND_WQE_BB
));
1103 MLX5_SET(wq
, wq
, log_wq_sz
, MLX5_GET(qpc
, qpc
, log_sq_size
));
1104 MLX5_SET(wq
, wq
, log_wq_pg_sz
, page_shift
- MLX5_ADAPTER_PAGE_SHIFT
);
1105 MLX5_SET(wq
, wq
, page_offset
, offset
);
1107 pas
= (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
);
1108 mlx5_ib_populate_pas(dev
, sq
->ubuffer
.umem
, page_shift
, pas
, 0);
1110 err
= mlx5_core_create_sq_tracked(dev
->mdev
, in
, inlen
, &sq
->base
.mqp
);
1120 ib_umem_release(sq
->ubuffer
.umem
);
1121 sq
->ubuffer
.umem
= NULL
;
1126 static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev
*dev
,
1127 struct mlx5_ib_sq
*sq
)
1129 mlx5_core_destroy_sq_tracked(dev
->mdev
, &sq
->base
.mqp
);
1130 ib_umem_release(sq
->ubuffer
.umem
);
1133 static int get_rq_pas_size(void *qpc
)
1135 u32 log_page_size
= MLX5_GET(qpc
, qpc
, log_page_size
) + 12;
1136 u32 log_rq_stride
= MLX5_GET(qpc
, qpc
, log_rq_stride
);
1137 u32 log_rq_size
= MLX5_GET(qpc
, qpc
, log_rq_size
);
1138 u32 page_offset
= MLX5_GET(qpc
, qpc
, page_offset
);
1139 u32 po_quanta
= 1 << (log_page_size
- 6);
1140 u32 rq_sz
= 1 << (log_rq_size
+ 4 + log_rq_stride
);
1141 u32 page_size
= 1 << log_page_size
;
1142 u32 rq_sz_po
= rq_sz
+ (page_offset
* po_quanta
);
1143 u32 rq_num_pas
= (rq_sz_po
+ page_size
- 1) / page_size
;
1145 return rq_num_pas
* sizeof(u64
);
1148 static int create_raw_packet_qp_rq(struct mlx5_ib_dev
*dev
,
1149 struct mlx5_ib_rq
*rq
, void *qpin
)
1151 struct mlx5_ib_qp
*mqp
= rq
->base
.container_mibqp
;
1157 void *qpc
= MLX5_ADDR_OF(create_qp_in
, qpin
, qpc
);
1160 u32 rq_pas_size
= get_rq_pas_size(qpc
);
1162 inlen
= MLX5_ST_SZ_BYTES(create_rq_in
) + rq_pas_size
;
1163 in
= kvzalloc(inlen
, GFP_KERNEL
);
1167 rqc
= MLX5_ADDR_OF(create_rq_in
, in
, ctx
);
1168 if (!(rq
->flags
& MLX5_IB_RQ_CVLAN_STRIPPING
))
1169 MLX5_SET(rqc
, rqc
, vsd
, 1);
1170 MLX5_SET(rqc
, rqc
, mem_rq_type
, MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE
);
1171 MLX5_SET(rqc
, rqc
, state
, MLX5_RQC_STATE_RST
);
1172 MLX5_SET(rqc
, rqc
, flush_in_error_en
, 1);
1173 MLX5_SET(rqc
, rqc
, user_index
, MLX5_GET(qpc
, qpc
, user_index
));
1174 MLX5_SET(rqc
, rqc
, cqn
, MLX5_GET(qpc
, qpc
, cqn_rcv
));
1176 if (mqp
->flags
& MLX5_IB_QP_CAP_SCATTER_FCS
)
1177 MLX5_SET(rqc
, rqc
, scatter_fcs
, 1);
1179 wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
1180 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_CYCLIC
);
1181 if (rq
->flags
& MLX5_IB_RQ_PCI_WRITE_END_PADDING
)
1182 MLX5_SET(wq
, wq
, end_padding_mode
, MLX5_WQ_END_PAD_MODE_ALIGN
);
1183 MLX5_SET(wq
, wq
, page_offset
, MLX5_GET(qpc
, qpc
, page_offset
));
1184 MLX5_SET(wq
, wq
, pd
, MLX5_GET(qpc
, qpc
, pd
));
1185 MLX5_SET64(wq
, wq
, dbr_addr
, MLX5_GET64(qpc
, qpc
, dbr_addr
));
1186 MLX5_SET(wq
, wq
, log_wq_stride
, MLX5_GET(qpc
, qpc
, log_rq_stride
) + 4);
1187 MLX5_SET(wq
, wq
, log_wq_pg_sz
, MLX5_GET(qpc
, qpc
, log_page_size
));
1188 MLX5_SET(wq
, wq
, log_wq_sz
, MLX5_GET(qpc
, qpc
, log_rq_size
));
1190 pas
= (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
);
1191 qp_pas
= (__be64
*)MLX5_ADDR_OF(create_qp_in
, qpin
, pas
);
1192 memcpy(pas
, qp_pas
, rq_pas_size
);
1194 err
= mlx5_core_create_rq_tracked(dev
->mdev
, in
, inlen
, &rq
->base
.mqp
);
1201 static void destroy_raw_packet_qp_rq(struct mlx5_ib_dev
*dev
,
1202 struct mlx5_ib_rq
*rq
)
1204 mlx5_core_destroy_rq_tracked(dev
->mdev
, &rq
->base
.mqp
);
1207 static bool tunnel_offload_supported(struct mlx5_core_dev
*dev
)
1209 return (MLX5_CAP_ETH(dev
, tunnel_stateless_vxlan
) ||
1210 MLX5_CAP_ETH(dev
, tunnel_stateless_gre
) ||
1211 MLX5_CAP_ETH(dev
, tunnel_stateless_geneve_rx
));
1214 static int create_raw_packet_qp_tir(struct mlx5_ib_dev
*dev
,
1215 struct mlx5_ib_rq
*rq
, u32 tdn
,
1216 bool tunnel_offload_en
)
1223 inlen
= MLX5_ST_SZ_BYTES(create_tir_in
);
1224 in
= kvzalloc(inlen
, GFP_KERNEL
);
1228 tirc
= MLX5_ADDR_OF(create_tir_in
, in
, ctx
);
1229 MLX5_SET(tirc
, tirc
, disp_type
, MLX5_TIRC_DISP_TYPE_DIRECT
);
1230 MLX5_SET(tirc
, tirc
, inline_rqn
, rq
->base
.mqp
.qpn
);
1231 MLX5_SET(tirc
, tirc
, transport_domain
, tdn
);
1232 if (tunnel_offload_en
)
1233 MLX5_SET(tirc
, tirc
, tunneled_offload_en
, 1);
1235 err
= mlx5_core_create_tir(dev
->mdev
, in
, inlen
, &rq
->tirn
);
1242 static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev
*dev
,
1243 struct mlx5_ib_rq
*rq
)
1245 mlx5_core_destroy_tir(dev
->mdev
, rq
->tirn
);
1248 static int create_raw_packet_qp(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
1252 struct mlx5_ib_raw_packet_qp
*raw_packet_qp
= &qp
->raw_packet_qp
;
1253 struct mlx5_ib_sq
*sq
= &raw_packet_qp
->sq
;
1254 struct mlx5_ib_rq
*rq
= &raw_packet_qp
->rq
;
1255 struct ib_uobject
*uobj
= pd
->uobject
;
1256 struct ib_ucontext
*ucontext
= uobj
->context
;
1257 struct mlx5_ib_ucontext
*mucontext
= to_mucontext(ucontext
);
1259 u32 tdn
= mucontext
->tdn
;
1261 if (qp
->sq
.wqe_cnt
) {
1262 err
= create_raw_packet_qp_tis(dev
, qp
, sq
, tdn
);
1266 err
= create_raw_packet_qp_sq(dev
, sq
, in
, pd
);
1268 goto err_destroy_tis
;
1270 sq
->base
.container_mibqp
= qp
;
1271 sq
->base
.mqp
.event
= mlx5_ib_qp_event
;
1274 if (qp
->rq
.wqe_cnt
) {
1275 rq
->base
.container_mibqp
= qp
;
1277 if (qp
->flags
& MLX5_IB_QP_CVLAN_STRIPPING
)
1278 rq
->flags
|= MLX5_IB_RQ_CVLAN_STRIPPING
;
1279 if (qp
->flags
& MLX5_IB_QP_PCI_WRITE_END_PADDING
)
1280 rq
->flags
|= MLX5_IB_RQ_PCI_WRITE_END_PADDING
;
1281 err
= create_raw_packet_qp_rq(dev
, rq
, in
);
1283 goto err_destroy_sq
;
1286 err
= create_raw_packet_qp_tir(dev
, rq
, tdn
,
1287 qp
->tunnel_offload_en
);
1289 goto err_destroy_rq
;
1292 qp
->trans_qp
.base
.mqp
.qpn
= qp
->sq
.wqe_cnt
? sq
->base
.mqp
.qpn
:
1298 destroy_raw_packet_qp_rq(dev
, rq
);
1300 if (!qp
->sq
.wqe_cnt
)
1302 destroy_raw_packet_qp_sq(dev
, sq
);
1304 destroy_raw_packet_qp_tis(dev
, sq
);
1309 static void destroy_raw_packet_qp(struct mlx5_ib_dev
*dev
,
1310 struct mlx5_ib_qp
*qp
)
1312 struct mlx5_ib_raw_packet_qp
*raw_packet_qp
= &qp
->raw_packet_qp
;
1313 struct mlx5_ib_sq
*sq
= &raw_packet_qp
->sq
;
1314 struct mlx5_ib_rq
*rq
= &raw_packet_qp
->rq
;
1316 if (qp
->rq
.wqe_cnt
) {
1317 destroy_raw_packet_qp_tir(dev
, rq
);
1318 destroy_raw_packet_qp_rq(dev
, rq
);
1321 if (qp
->sq
.wqe_cnt
) {
1322 destroy_raw_packet_qp_sq(dev
, sq
);
1323 destroy_raw_packet_qp_tis(dev
, sq
);
1327 static void raw_packet_qp_copy_info(struct mlx5_ib_qp
*qp
,
1328 struct mlx5_ib_raw_packet_qp
*raw_packet_qp
)
1330 struct mlx5_ib_sq
*sq
= &raw_packet_qp
->sq
;
1331 struct mlx5_ib_rq
*rq
= &raw_packet_qp
->rq
;
1335 sq
->doorbell
= &qp
->db
;
1336 rq
->doorbell
= &qp
->db
;
1339 static void destroy_rss_raw_qp_tir(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
)
1341 mlx5_core_destroy_tir(dev
->mdev
, qp
->rss_qp
.tirn
);
1344 static int create_rss_raw_qp_tir(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
1346 struct ib_qp_init_attr
*init_attr
,
1347 struct ib_udata
*udata
)
1349 struct ib_uobject
*uobj
= pd
->uobject
;
1350 struct ib_ucontext
*ucontext
= uobj
->context
;
1351 struct mlx5_ib_ucontext
*mucontext
= to_mucontext(ucontext
);
1352 struct mlx5_ib_create_qp_resp resp
= {};
1358 u32 selected_fields
= 0;
1359 size_t min_resp_len
;
1360 u32 tdn
= mucontext
->tdn
;
1361 struct mlx5_ib_create_qp_rss ucmd
= {};
1362 size_t required_cmd_sz
;
1364 if (init_attr
->qp_type
!= IB_QPT_RAW_PACKET
)
1367 if (init_attr
->create_flags
|| init_attr
->send_cq
)
1370 min_resp_len
= offsetof(typeof(resp
), bfreg_index
) + sizeof(resp
.bfreg_index
);
1371 if (udata
->outlen
< min_resp_len
)
1374 required_cmd_sz
= offsetof(typeof(ucmd
), flags
) + sizeof(ucmd
.flags
);
1375 if (udata
->inlen
< required_cmd_sz
) {
1376 mlx5_ib_dbg(dev
, "invalid inlen\n");
1380 if (udata
->inlen
> sizeof(ucmd
) &&
1381 !ib_is_udata_cleared(udata
, sizeof(ucmd
),
1382 udata
->inlen
- sizeof(ucmd
))) {
1383 mlx5_ib_dbg(dev
, "inlen is not supported\n");
1387 if (ib_copy_from_udata(&ucmd
, udata
, min(sizeof(ucmd
), udata
->inlen
))) {
1388 mlx5_ib_dbg(dev
, "copy failed\n");
1392 if (ucmd
.comp_mask
) {
1393 mlx5_ib_dbg(dev
, "invalid comp mask\n");
1397 if (ucmd
.flags
& ~MLX5_QP_FLAG_TUNNEL_OFFLOADS
) {
1398 mlx5_ib_dbg(dev
, "invalid flags\n");
1402 if (ucmd
.flags
& MLX5_QP_FLAG_TUNNEL_OFFLOADS
&&
1403 !tunnel_offload_supported(dev
->mdev
)) {
1404 mlx5_ib_dbg(dev
, "tunnel offloads isn't supported\n");
1408 if (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_INNER
&&
1409 !(ucmd
.flags
& MLX5_QP_FLAG_TUNNEL_OFFLOADS
)) {
1410 mlx5_ib_dbg(dev
, "Tunnel offloads must be set for inner RSS\n");
1414 err
= ib_copy_to_udata(udata
, &resp
, min_resp_len
);
1416 mlx5_ib_dbg(dev
, "copy failed\n");
1420 inlen
= MLX5_ST_SZ_BYTES(create_tir_in
);
1421 in
= kvzalloc(inlen
, GFP_KERNEL
);
1425 tirc
= MLX5_ADDR_OF(create_tir_in
, in
, ctx
);
1426 MLX5_SET(tirc
, tirc
, disp_type
,
1427 MLX5_TIRC_DISP_TYPE_INDIRECT
);
1428 MLX5_SET(tirc
, tirc
, indirect_table
,
1429 init_attr
->rwq_ind_tbl
->ind_tbl_num
);
1430 MLX5_SET(tirc
, tirc
, transport_domain
, tdn
);
1432 hfso
= MLX5_ADDR_OF(tirc
, tirc
, rx_hash_field_selector_outer
);
1434 if (ucmd
.flags
& MLX5_QP_FLAG_TUNNEL_OFFLOADS
)
1435 MLX5_SET(tirc
, tirc
, tunneled_offload_en
, 1);
1437 if (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_INNER
)
1438 hfso
= MLX5_ADDR_OF(tirc
, tirc
, rx_hash_field_selector_inner
);
1440 hfso
= MLX5_ADDR_OF(tirc
, tirc
, rx_hash_field_selector_outer
);
1442 switch (ucmd
.rx_hash_function
) {
1443 case MLX5_RX_HASH_FUNC_TOEPLITZ
:
1445 void *rss_key
= MLX5_ADDR_OF(tirc
, tirc
, rx_hash_toeplitz_key
);
1446 size_t len
= MLX5_FLD_SZ_BYTES(tirc
, rx_hash_toeplitz_key
);
1448 if (len
!= ucmd
.rx_key_len
) {
1453 MLX5_SET(tirc
, tirc
, rx_hash_fn
, MLX5_RX_HASH_FN_TOEPLITZ
);
1454 MLX5_SET(tirc
, tirc
, rx_hash_symmetric
, 1);
1455 memcpy(rss_key
, ucmd
.rx_hash_key
, len
);
1463 if (!ucmd
.rx_hash_fields_mask
) {
1464 /* special case when this TIR serves as steering entry without hashing */
1465 if (!init_attr
->rwq_ind_tbl
->log_ind_tbl_size
)
1471 if (((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_IPV4
) ||
1472 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_IPV4
)) &&
1473 ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_IPV6
) ||
1474 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_IPV6
))) {
1479 /* If none of IPV4 & IPV6 SRC/DST was set - this bit field is ignored */
1480 if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_IPV4
) ||
1481 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_IPV4
))
1482 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1483 MLX5_L3_PROT_TYPE_IPV4
);
1484 else if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_IPV6
) ||
1485 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_IPV6
))
1486 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1487 MLX5_L3_PROT_TYPE_IPV6
);
1489 if (((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_PORT_TCP
) ||
1490 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_PORT_TCP
)) &&
1491 ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_PORT_UDP
) ||
1492 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_PORT_UDP
))) {
1497 /* If none of TCP & UDP SRC/DST was set - this bit field is ignored */
1498 if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_PORT_TCP
) ||
1499 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_PORT_TCP
))
1500 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
1501 MLX5_L4_PROT_TYPE_TCP
);
1502 else if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_PORT_UDP
) ||
1503 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_PORT_UDP
))
1504 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
1505 MLX5_L4_PROT_TYPE_UDP
);
1507 if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_IPV4
) ||
1508 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_IPV6
))
1509 selected_fields
|= MLX5_HASH_FIELD_SEL_SRC_IP
;
1511 if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_IPV4
) ||
1512 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_IPV6
))
1513 selected_fields
|= MLX5_HASH_FIELD_SEL_DST_IP
;
1515 if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_PORT_TCP
) ||
1516 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_PORT_UDP
))
1517 selected_fields
|= MLX5_HASH_FIELD_SEL_L4_SPORT
;
1519 if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_PORT_TCP
) ||
1520 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_PORT_UDP
))
1521 selected_fields
|= MLX5_HASH_FIELD_SEL_L4_DPORT
;
1523 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
, selected_fields
);
1526 err
= mlx5_core_create_tir(dev
->mdev
, in
, inlen
, &qp
->rss_qp
.tirn
);
1532 /* qpn is reserved for that QP */
1533 qp
->trans_qp
.base
.mqp
.qpn
= 0;
1534 qp
->flags
|= MLX5_IB_QP_RSS
;
1542 static int create_qp_common(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
1543 struct ib_qp_init_attr
*init_attr
,
1544 struct ib_udata
*udata
, struct mlx5_ib_qp
*qp
)
1546 struct mlx5_ib_resources
*devr
= &dev
->devr
;
1547 int inlen
= MLX5_ST_SZ_BYTES(create_qp_in
);
1548 struct mlx5_core_dev
*mdev
= dev
->mdev
;
1549 struct mlx5_ib_create_qp_resp resp
;
1550 struct mlx5_ib_cq
*send_cq
;
1551 struct mlx5_ib_cq
*recv_cq
;
1552 unsigned long flags
;
1553 u32 uidx
= MLX5_IB_DEFAULT_UIDX
;
1554 struct mlx5_ib_create_qp ucmd
;
1555 struct mlx5_ib_qp_base
*base
;
1560 mutex_init(&qp
->mutex
);
1561 spin_lock_init(&qp
->sq
.lock
);
1562 spin_lock_init(&qp
->rq
.lock
);
1564 if (init_attr
->rwq_ind_tbl
) {
1568 err
= create_rss_raw_qp_tir(dev
, qp
, pd
, init_attr
, udata
);
1572 if (init_attr
->create_flags
& IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
) {
1573 if (!MLX5_CAP_GEN(mdev
, block_lb_mc
)) {
1574 mlx5_ib_dbg(dev
, "block multicast loopback isn't supported\n");
1577 qp
->flags
|= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK
;
1581 if (init_attr
->create_flags
&
1582 (IB_QP_CREATE_CROSS_CHANNEL
|
1583 IB_QP_CREATE_MANAGED_SEND
|
1584 IB_QP_CREATE_MANAGED_RECV
)) {
1585 if (!MLX5_CAP_GEN(mdev
, cd
)) {
1586 mlx5_ib_dbg(dev
, "cross-channel isn't supported\n");
1589 if (init_attr
->create_flags
& IB_QP_CREATE_CROSS_CHANNEL
)
1590 qp
->flags
|= MLX5_IB_QP_CROSS_CHANNEL
;
1591 if (init_attr
->create_flags
& IB_QP_CREATE_MANAGED_SEND
)
1592 qp
->flags
|= MLX5_IB_QP_MANAGED_SEND
;
1593 if (init_attr
->create_flags
& IB_QP_CREATE_MANAGED_RECV
)
1594 qp
->flags
|= MLX5_IB_QP_MANAGED_RECV
;
1597 if (init_attr
->qp_type
== IB_QPT_UD
&&
1598 (init_attr
->create_flags
& IB_QP_CREATE_IPOIB_UD_LSO
))
1599 if (!MLX5_CAP_GEN(mdev
, ipoib_basic_offloads
)) {
1600 mlx5_ib_dbg(dev
, "ipoib UD lso qp isn't supported\n");
1604 if (init_attr
->create_flags
& IB_QP_CREATE_SCATTER_FCS
) {
1605 if (init_attr
->qp_type
!= IB_QPT_RAW_PACKET
) {
1606 mlx5_ib_dbg(dev
, "Scatter FCS is supported only for Raw Packet QPs");
1609 if (!MLX5_CAP_GEN(dev
->mdev
, eth_net_offloads
) ||
1610 !MLX5_CAP_ETH(dev
->mdev
, scatter_fcs
)) {
1611 mlx5_ib_dbg(dev
, "Scatter FCS isn't supported\n");
1614 qp
->flags
|= MLX5_IB_QP_CAP_SCATTER_FCS
;
1617 if (init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
)
1618 qp
->sq_signal_bits
= MLX5_WQE_CTRL_CQ_UPDATE
;
1620 if (init_attr
->create_flags
& IB_QP_CREATE_CVLAN_STRIPPING
) {
1621 if (!(MLX5_CAP_GEN(dev
->mdev
, eth_net_offloads
) &&
1622 MLX5_CAP_ETH(dev
->mdev
, vlan_cap
)) ||
1623 (init_attr
->qp_type
!= IB_QPT_RAW_PACKET
))
1625 qp
->flags
|= MLX5_IB_QP_CVLAN_STRIPPING
;
1628 if (pd
&& pd
->uobject
) {
1629 if (ib_copy_from_udata(&ucmd
, udata
, sizeof(ucmd
))) {
1630 mlx5_ib_dbg(dev
, "copy failed\n");
1634 err
= get_qp_user_index(to_mucontext(pd
->uobject
->context
),
1635 &ucmd
, udata
->inlen
, &uidx
);
1639 qp
->wq_sig
= !!(ucmd
.flags
& MLX5_QP_FLAG_SIGNATURE
);
1640 qp
->scat_cqe
= !!(ucmd
.flags
& MLX5_QP_FLAG_SCATTER_CQE
);
1641 if (ucmd
.flags
& MLX5_QP_FLAG_TUNNEL_OFFLOADS
) {
1642 if (init_attr
->qp_type
!= IB_QPT_RAW_PACKET
||
1643 !tunnel_offload_supported(mdev
)) {
1644 mlx5_ib_dbg(dev
, "Tunnel offload isn't supported\n");
1647 qp
->tunnel_offload_en
= true;
1650 if (init_attr
->create_flags
& IB_QP_CREATE_SOURCE_QPN
) {
1651 if (init_attr
->qp_type
!= IB_QPT_UD
||
1652 (MLX5_CAP_GEN(dev
->mdev
, port_type
) !=
1653 MLX5_CAP_PORT_TYPE_IB
) ||
1654 !mlx5_get_flow_namespace(dev
->mdev
, MLX5_FLOW_NAMESPACE_BYPASS
)) {
1655 mlx5_ib_dbg(dev
, "Source QP option isn't supported\n");
1659 qp
->flags
|= MLX5_IB_QP_UNDERLAY
;
1660 qp
->underlay_qpn
= init_attr
->source_qpn
;
1663 qp
->wq_sig
= !!wq_signature
;
1666 base
= (init_attr
->qp_type
== IB_QPT_RAW_PACKET
||
1667 qp
->flags
& MLX5_IB_QP_UNDERLAY
) ?
1668 &qp
->raw_packet_qp
.rq
.base
:
1671 qp
->has_rq
= qp_has_rq(init_attr
);
1672 err
= set_rq_size(dev
, &init_attr
->cap
, qp
->has_rq
,
1673 qp
, (pd
&& pd
->uobject
) ? &ucmd
: NULL
);
1675 mlx5_ib_dbg(dev
, "err %d\n", err
);
1682 1 << MLX5_CAP_GEN(mdev
, log_max_qp_sz
);
1683 mlx5_ib_dbg(dev
, "requested sq_wqe_count (%d)\n", ucmd
.sq_wqe_count
);
1684 if (ucmd
.rq_wqe_shift
!= qp
->rq
.wqe_shift
||
1685 ucmd
.rq_wqe_count
!= qp
->rq
.wqe_cnt
) {
1686 mlx5_ib_dbg(dev
, "invalid rq params\n");
1689 if (ucmd
.sq_wqe_count
> max_wqes
) {
1690 mlx5_ib_dbg(dev
, "requested sq_wqe_count (%d) > max allowed (%d)\n",
1691 ucmd
.sq_wqe_count
, max_wqes
);
1694 if (init_attr
->create_flags
&
1695 mlx5_ib_create_qp_sqpn_qp1()) {
1696 mlx5_ib_dbg(dev
, "user-space is not allowed to create UD QPs spoofing as QP1\n");
1699 err
= create_user_qp(dev
, pd
, qp
, udata
, init_attr
, &in
,
1700 &resp
, &inlen
, base
);
1702 mlx5_ib_dbg(dev
, "err %d\n", err
);
1704 err
= create_kernel_qp(dev
, init_attr
, qp
, &in
, &inlen
,
1707 mlx5_ib_dbg(dev
, "err %d\n", err
);
1713 in
= kvzalloc(inlen
, GFP_KERNEL
);
1717 qp
->create_type
= MLX5_QP_EMPTY
;
1720 if (is_sqp(init_attr
->qp_type
))
1721 qp
->port
= init_attr
->port_num
;
1723 qpc
= MLX5_ADDR_OF(create_qp_in
, in
, qpc
);
1725 MLX5_SET(qpc
, qpc
, st
, to_mlx5_st(init_attr
->qp_type
));
1726 MLX5_SET(qpc
, qpc
, pm_state
, MLX5_QP_PM_MIGRATED
);
1728 if (init_attr
->qp_type
!= MLX5_IB_QPT_REG_UMR
)
1729 MLX5_SET(qpc
, qpc
, pd
, to_mpd(pd
? pd
: devr
->p0
)->pdn
);
1731 MLX5_SET(qpc
, qpc
, latency_sensitive
, 1);
1735 MLX5_SET(qpc
, qpc
, wq_signature
, 1);
1737 if (qp
->flags
& MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK
)
1738 MLX5_SET(qpc
, qpc
, block_lb_mc
, 1);
1740 if (qp
->flags
& MLX5_IB_QP_CROSS_CHANNEL
)
1741 MLX5_SET(qpc
, qpc
, cd_master
, 1);
1742 if (qp
->flags
& MLX5_IB_QP_MANAGED_SEND
)
1743 MLX5_SET(qpc
, qpc
, cd_slave_send
, 1);
1744 if (qp
->flags
& MLX5_IB_QP_MANAGED_RECV
)
1745 MLX5_SET(qpc
, qpc
, cd_slave_receive
, 1);
1747 if (qp
->scat_cqe
&& is_connected(init_attr
->qp_type
)) {
1751 rcqe_sz
= mlx5_ib_get_cqe_size(dev
, init_attr
->recv_cq
);
1752 scqe_sz
= mlx5_ib_get_cqe_size(dev
, init_attr
->send_cq
);
1755 MLX5_SET(qpc
, qpc
, cs_res
, MLX5_RES_SCAT_DATA64_CQE
);
1757 MLX5_SET(qpc
, qpc
, cs_res
, MLX5_RES_SCAT_DATA32_CQE
);
1759 if (init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
) {
1761 MLX5_SET(qpc
, qpc
, cs_req
, MLX5_REQ_SCAT_DATA64_CQE
);
1763 MLX5_SET(qpc
, qpc
, cs_req
, MLX5_REQ_SCAT_DATA32_CQE
);
1767 if (qp
->rq
.wqe_cnt
) {
1768 MLX5_SET(qpc
, qpc
, log_rq_stride
, qp
->rq
.wqe_shift
- 4);
1769 MLX5_SET(qpc
, qpc
, log_rq_size
, ilog2(qp
->rq
.wqe_cnt
));
1772 MLX5_SET(qpc
, qpc
, rq_type
, get_rx_type(qp
, init_attr
));
1774 if (qp
->sq
.wqe_cnt
) {
1775 MLX5_SET(qpc
, qpc
, log_sq_size
, ilog2(qp
->sq
.wqe_cnt
));
1777 MLX5_SET(qpc
, qpc
, no_sq
, 1);
1778 if (init_attr
->srq
&&
1779 init_attr
->srq
->srq_type
== IB_SRQT_TM
)
1780 MLX5_SET(qpc
, qpc
, offload_type
,
1781 MLX5_QPC_OFFLOAD_TYPE_RNDV
);
1784 /* Set default resources */
1785 switch (init_attr
->qp_type
) {
1786 case IB_QPT_XRC_TGT
:
1787 MLX5_SET(qpc
, qpc
, cqn_rcv
, to_mcq(devr
->c0
)->mcq
.cqn
);
1788 MLX5_SET(qpc
, qpc
, cqn_snd
, to_mcq(devr
->c0
)->mcq
.cqn
);
1789 MLX5_SET(qpc
, qpc
, srqn_rmpn_xrqn
, to_msrq(devr
->s0
)->msrq
.srqn
);
1790 MLX5_SET(qpc
, qpc
, xrcd
, to_mxrcd(init_attr
->xrcd
)->xrcdn
);
1792 case IB_QPT_XRC_INI
:
1793 MLX5_SET(qpc
, qpc
, cqn_rcv
, to_mcq(devr
->c0
)->mcq
.cqn
);
1794 MLX5_SET(qpc
, qpc
, xrcd
, to_mxrcd(devr
->x1
)->xrcdn
);
1795 MLX5_SET(qpc
, qpc
, srqn_rmpn_xrqn
, to_msrq(devr
->s0
)->msrq
.srqn
);
1798 if (init_attr
->srq
) {
1799 MLX5_SET(qpc
, qpc
, xrcd
, to_mxrcd(devr
->x0
)->xrcdn
);
1800 MLX5_SET(qpc
, qpc
, srqn_rmpn_xrqn
, to_msrq(init_attr
->srq
)->msrq
.srqn
);
1802 MLX5_SET(qpc
, qpc
, xrcd
, to_mxrcd(devr
->x1
)->xrcdn
);
1803 MLX5_SET(qpc
, qpc
, srqn_rmpn_xrqn
, to_msrq(devr
->s1
)->msrq
.srqn
);
1807 if (init_attr
->send_cq
)
1808 MLX5_SET(qpc
, qpc
, cqn_snd
, to_mcq(init_attr
->send_cq
)->mcq
.cqn
);
1810 if (init_attr
->recv_cq
)
1811 MLX5_SET(qpc
, qpc
, cqn_rcv
, to_mcq(init_attr
->recv_cq
)->mcq
.cqn
);
1813 MLX5_SET64(qpc
, qpc
, dbr_addr
, qp
->db
.dma
);
1815 /* 0xffffff means we ask to work with cqe version 0 */
1816 if (MLX5_CAP_GEN(mdev
, cqe_version
) == MLX5_CQE_VERSION_V1
)
1817 MLX5_SET(qpc
, qpc
, user_index
, uidx
);
1819 /* we use IB_QP_CREATE_IPOIB_UD_LSO to indicates ipoib qp */
1820 if (init_attr
->qp_type
== IB_QPT_UD
&&
1821 (init_attr
->create_flags
& IB_QP_CREATE_IPOIB_UD_LSO
)) {
1822 MLX5_SET(qpc
, qpc
, ulp_stateless_offload_mode
, 1);
1823 qp
->flags
|= MLX5_IB_QP_LSO
;
1826 if (init_attr
->create_flags
& IB_QP_CREATE_PCI_WRITE_END_PADDING
) {
1827 if (!MLX5_CAP_GEN(dev
->mdev
, end_pad
)) {
1828 mlx5_ib_dbg(dev
, "scatter end padding is not supported\n");
1831 } else if (init_attr
->qp_type
!= IB_QPT_RAW_PACKET
) {
1832 MLX5_SET(qpc
, qpc
, end_padding_mode
,
1833 MLX5_WQ_END_PAD_MODE_ALIGN
);
1835 qp
->flags
|= MLX5_IB_QP_PCI_WRITE_END_PADDING
;
1839 if (init_attr
->qp_type
== IB_QPT_RAW_PACKET
||
1840 qp
->flags
& MLX5_IB_QP_UNDERLAY
) {
1841 qp
->raw_packet_qp
.sq
.ubuffer
.buf_addr
= ucmd
.sq_buf_addr
;
1842 raw_packet_qp_copy_info(qp
, &qp
->raw_packet_qp
);
1843 err
= create_raw_packet_qp(dev
, qp
, in
, pd
);
1845 err
= mlx5_core_create_qp(dev
->mdev
, &base
->mqp
, in
, inlen
);
1849 mlx5_ib_dbg(dev
, "create qp failed\n");
1855 base
->container_mibqp
= qp
;
1856 base
->mqp
.event
= mlx5_ib_qp_event
;
1858 get_cqs(init_attr
->qp_type
, init_attr
->send_cq
, init_attr
->recv_cq
,
1859 &send_cq
, &recv_cq
);
1860 spin_lock_irqsave(&dev
->reset_flow_resource_lock
, flags
);
1861 mlx5_ib_lock_cqs(send_cq
, recv_cq
);
1862 /* Maintain device to QPs access, needed for further handling via reset
1865 list_add_tail(&qp
->qps_list
, &dev
->qp_list
);
1866 /* Maintain CQ to QPs access, needed for further handling via reset flow
1869 list_add_tail(&qp
->cq_send_list
, &send_cq
->list_send_qp
);
1871 list_add_tail(&qp
->cq_recv_list
, &recv_cq
->list_recv_qp
);
1872 mlx5_ib_unlock_cqs(send_cq
, recv_cq
);
1873 spin_unlock_irqrestore(&dev
->reset_flow_resource_lock
, flags
);
1878 if (qp
->create_type
== MLX5_QP_USER
)
1879 destroy_qp_user(dev
, pd
, qp
, base
);
1880 else if (qp
->create_type
== MLX5_QP_KERNEL
)
1881 destroy_qp_kernel(dev
, qp
);
1888 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq
*send_cq
, struct mlx5_ib_cq
*recv_cq
)
1889 __acquires(&send_cq
->lock
) __acquires(&recv_cq
->lock
)
1893 if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
1894 spin_lock(&send_cq
->lock
);
1895 spin_lock_nested(&recv_cq
->lock
,
1896 SINGLE_DEPTH_NESTING
);
1897 } else if (send_cq
->mcq
.cqn
== recv_cq
->mcq
.cqn
) {
1898 spin_lock(&send_cq
->lock
);
1899 __acquire(&recv_cq
->lock
);
1901 spin_lock(&recv_cq
->lock
);
1902 spin_lock_nested(&send_cq
->lock
,
1903 SINGLE_DEPTH_NESTING
);
1906 spin_lock(&send_cq
->lock
);
1907 __acquire(&recv_cq
->lock
);
1909 } else if (recv_cq
) {
1910 spin_lock(&recv_cq
->lock
);
1911 __acquire(&send_cq
->lock
);
1913 __acquire(&send_cq
->lock
);
1914 __acquire(&recv_cq
->lock
);
1918 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq
*send_cq
, struct mlx5_ib_cq
*recv_cq
)
1919 __releases(&send_cq
->lock
) __releases(&recv_cq
->lock
)
1923 if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
1924 spin_unlock(&recv_cq
->lock
);
1925 spin_unlock(&send_cq
->lock
);
1926 } else if (send_cq
->mcq
.cqn
== recv_cq
->mcq
.cqn
) {
1927 __release(&recv_cq
->lock
);
1928 spin_unlock(&send_cq
->lock
);
1930 spin_unlock(&send_cq
->lock
);
1931 spin_unlock(&recv_cq
->lock
);
1934 __release(&recv_cq
->lock
);
1935 spin_unlock(&send_cq
->lock
);
1937 } else if (recv_cq
) {
1938 __release(&send_cq
->lock
);
1939 spin_unlock(&recv_cq
->lock
);
1941 __release(&recv_cq
->lock
);
1942 __release(&send_cq
->lock
);
1946 static struct mlx5_ib_pd
*get_pd(struct mlx5_ib_qp
*qp
)
1948 return to_mpd(qp
->ibqp
.pd
);
1951 static void get_cqs(enum ib_qp_type qp_type
,
1952 struct ib_cq
*ib_send_cq
, struct ib_cq
*ib_recv_cq
,
1953 struct mlx5_ib_cq
**send_cq
, struct mlx5_ib_cq
**recv_cq
)
1956 case IB_QPT_XRC_TGT
:
1960 case MLX5_IB_QPT_REG_UMR
:
1961 case IB_QPT_XRC_INI
:
1962 *send_cq
= ib_send_cq
? to_mcq(ib_send_cq
) : NULL
;
1967 case MLX5_IB_QPT_HW_GSI
:
1971 case IB_QPT_RAW_IPV6
:
1972 case IB_QPT_RAW_ETHERTYPE
:
1973 case IB_QPT_RAW_PACKET
:
1974 *send_cq
= ib_send_cq
? to_mcq(ib_send_cq
) : NULL
;
1975 *recv_cq
= ib_recv_cq
? to_mcq(ib_recv_cq
) : NULL
;
1986 static int modify_raw_packet_qp(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
1987 const struct mlx5_modify_raw_qp_param
*raw_qp_param
,
1988 u8 lag_tx_affinity
);
1990 static void destroy_qp_common(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
)
1992 struct mlx5_ib_cq
*send_cq
, *recv_cq
;
1993 struct mlx5_ib_qp_base
*base
;
1994 unsigned long flags
;
1997 if (qp
->ibqp
.rwq_ind_tbl
) {
1998 destroy_rss_raw_qp_tir(dev
, qp
);
2002 base
= (qp
->ibqp
.qp_type
== IB_QPT_RAW_PACKET
||
2003 qp
->flags
& MLX5_IB_QP_UNDERLAY
) ?
2004 &qp
->raw_packet_qp
.rq
.base
:
2007 if (qp
->state
!= IB_QPS_RESET
) {
2008 if (qp
->ibqp
.qp_type
!= IB_QPT_RAW_PACKET
&&
2009 !(qp
->flags
& MLX5_IB_QP_UNDERLAY
)) {
2010 err
= mlx5_core_qp_modify(dev
->mdev
,
2011 MLX5_CMD_OP_2RST_QP
, 0,
2014 struct mlx5_modify_raw_qp_param raw_qp_param
= {
2015 .operation
= MLX5_CMD_OP_2RST_QP
2018 err
= modify_raw_packet_qp(dev
, qp
, &raw_qp_param
, 0);
2021 mlx5_ib_warn(dev
, "mlx5_ib: modify QP 0x%06x to RESET failed\n",
2025 get_cqs(qp
->ibqp
.qp_type
, qp
->ibqp
.send_cq
, qp
->ibqp
.recv_cq
,
2026 &send_cq
, &recv_cq
);
2028 spin_lock_irqsave(&dev
->reset_flow_resource_lock
, flags
);
2029 mlx5_ib_lock_cqs(send_cq
, recv_cq
);
2030 /* del from lists under both locks above to protect reset flow paths */
2031 list_del(&qp
->qps_list
);
2033 list_del(&qp
->cq_send_list
);
2036 list_del(&qp
->cq_recv_list
);
2038 if (qp
->create_type
== MLX5_QP_KERNEL
) {
2039 __mlx5_ib_cq_clean(recv_cq
, base
->mqp
.qpn
,
2040 qp
->ibqp
.srq
? to_msrq(qp
->ibqp
.srq
) : NULL
);
2041 if (send_cq
!= recv_cq
)
2042 __mlx5_ib_cq_clean(send_cq
, base
->mqp
.qpn
,
2045 mlx5_ib_unlock_cqs(send_cq
, recv_cq
);
2046 spin_unlock_irqrestore(&dev
->reset_flow_resource_lock
, flags
);
2048 if (qp
->ibqp
.qp_type
== IB_QPT_RAW_PACKET
||
2049 qp
->flags
& MLX5_IB_QP_UNDERLAY
) {
2050 destroy_raw_packet_qp(dev
, qp
);
2052 err
= mlx5_core_destroy_qp(dev
->mdev
, &base
->mqp
);
2054 mlx5_ib_warn(dev
, "failed to destroy QP 0x%x\n",
2058 if (qp
->create_type
== MLX5_QP_KERNEL
)
2059 destroy_qp_kernel(dev
, qp
);
2060 else if (qp
->create_type
== MLX5_QP_USER
)
2061 destroy_qp_user(dev
, &get_pd(qp
)->ibpd
, qp
, base
);
2064 static const char *ib_qp_type_str(enum ib_qp_type type
)
2068 return "IB_QPT_SMI";
2070 return "IB_QPT_GSI";
2077 case IB_QPT_RAW_IPV6
:
2078 return "IB_QPT_RAW_IPV6";
2079 case IB_QPT_RAW_ETHERTYPE
:
2080 return "IB_QPT_RAW_ETHERTYPE";
2081 case IB_QPT_XRC_INI
:
2082 return "IB_QPT_XRC_INI";
2083 case IB_QPT_XRC_TGT
:
2084 return "IB_QPT_XRC_TGT";
2085 case IB_QPT_RAW_PACKET
:
2086 return "IB_QPT_RAW_PACKET";
2087 case MLX5_IB_QPT_REG_UMR
:
2088 return "MLX5_IB_QPT_REG_UMR";
2091 return "Invalid QP type";
2095 struct ib_qp
*mlx5_ib_create_qp(struct ib_pd
*pd
,
2096 struct ib_qp_init_attr
*init_attr
,
2097 struct ib_udata
*udata
)
2099 struct mlx5_ib_dev
*dev
;
2100 struct mlx5_ib_qp
*qp
;
2105 dev
= to_mdev(pd
->device
);
2107 if (init_attr
->qp_type
== IB_QPT_RAW_PACKET
) {
2109 mlx5_ib_dbg(dev
, "Raw Packet QP is not supported for kernel consumers\n");
2110 return ERR_PTR(-EINVAL
);
2111 } else if (!to_mucontext(pd
->uobject
->context
)->cqe_version
) {
2112 mlx5_ib_dbg(dev
, "Raw Packet QP is only supported for CQE version > 0\n");
2113 return ERR_PTR(-EINVAL
);
2117 /* being cautious here */
2118 if (init_attr
->qp_type
!= IB_QPT_XRC_TGT
&&
2119 init_attr
->qp_type
!= MLX5_IB_QPT_REG_UMR
) {
2120 pr_warn("%s: no PD for transport %s\n", __func__
,
2121 ib_qp_type_str(init_attr
->qp_type
));
2122 return ERR_PTR(-EINVAL
);
2124 dev
= to_mdev(to_mxrcd(init_attr
->xrcd
)->ibxrcd
.device
);
2127 switch (init_attr
->qp_type
) {
2128 case IB_QPT_XRC_TGT
:
2129 case IB_QPT_XRC_INI
:
2130 if (!MLX5_CAP_GEN(dev
->mdev
, xrc
)) {
2131 mlx5_ib_dbg(dev
, "XRC not supported\n");
2132 return ERR_PTR(-ENOSYS
);
2134 init_attr
->recv_cq
= NULL
;
2135 if (init_attr
->qp_type
== IB_QPT_XRC_TGT
) {
2136 xrcdn
= to_mxrcd(init_attr
->xrcd
)->xrcdn
;
2137 init_attr
->send_cq
= NULL
;
2141 case IB_QPT_RAW_PACKET
:
2146 case MLX5_IB_QPT_HW_GSI
:
2147 case MLX5_IB_QPT_REG_UMR
:
2148 qp
= kzalloc(sizeof(*qp
), GFP_KERNEL
);
2150 return ERR_PTR(-ENOMEM
);
2152 err
= create_qp_common(dev
, pd
, init_attr
, udata
, qp
);
2154 mlx5_ib_dbg(dev
, "create_qp_common failed\n");
2156 return ERR_PTR(err
);
2159 if (is_qp0(init_attr
->qp_type
))
2160 qp
->ibqp
.qp_num
= 0;
2161 else if (is_qp1(init_attr
->qp_type
))
2162 qp
->ibqp
.qp_num
= 1;
2164 qp
->ibqp
.qp_num
= qp
->trans_qp
.base
.mqp
.qpn
;
2166 mlx5_ib_dbg(dev
, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
2167 qp
->ibqp
.qp_num
, qp
->trans_qp
.base
.mqp
.qpn
,
2168 init_attr
->recv_cq
? to_mcq(init_attr
->recv_cq
)->mcq
.cqn
: -1,
2169 init_attr
->send_cq
? to_mcq(init_attr
->send_cq
)->mcq
.cqn
: -1);
2171 qp
->trans_qp
.xrcdn
= xrcdn
;
2176 return mlx5_ib_gsi_create_qp(pd
, init_attr
);
2178 case IB_QPT_RAW_IPV6
:
2179 case IB_QPT_RAW_ETHERTYPE
:
2182 mlx5_ib_dbg(dev
, "unsupported qp type %d\n",
2183 init_attr
->qp_type
);
2184 /* Don't support raw QPs */
2185 return ERR_PTR(-EINVAL
);
2191 int mlx5_ib_destroy_qp(struct ib_qp
*qp
)
2193 struct mlx5_ib_dev
*dev
= to_mdev(qp
->device
);
2194 struct mlx5_ib_qp
*mqp
= to_mqp(qp
);
2196 if (unlikely(qp
->qp_type
== IB_QPT_GSI
))
2197 return mlx5_ib_gsi_destroy_qp(qp
);
2199 destroy_qp_common(dev
, mqp
);
2206 static __be32
to_mlx5_access_flags(struct mlx5_ib_qp
*qp
, const struct ib_qp_attr
*attr
,
2209 u32 hw_access_flags
= 0;
2213 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
2214 dest_rd_atomic
= attr
->max_dest_rd_atomic
;
2216 dest_rd_atomic
= qp
->trans_qp
.resp_depth
;
2218 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
2219 access_flags
= attr
->qp_access_flags
;
2221 access_flags
= qp
->trans_qp
.atomic_rd_en
;
2223 if (!dest_rd_atomic
)
2224 access_flags
&= IB_ACCESS_REMOTE_WRITE
;
2226 if (access_flags
& IB_ACCESS_REMOTE_READ
)
2227 hw_access_flags
|= MLX5_QP_BIT_RRE
;
2228 if (access_flags
& IB_ACCESS_REMOTE_ATOMIC
)
2229 hw_access_flags
|= (MLX5_QP_BIT_RAE
| MLX5_ATOMIC_MODE_CX
);
2230 if (access_flags
& IB_ACCESS_REMOTE_WRITE
)
2231 hw_access_flags
|= MLX5_QP_BIT_RWE
;
2233 return cpu_to_be32(hw_access_flags
);
2237 MLX5_PATH_FLAG_FL
= 1 << 0,
2238 MLX5_PATH_FLAG_FREE_AR
= 1 << 1,
2239 MLX5_PATH_FLAG_COUNTER
= 1 << 2,
2242 static int ib_rate_to_mlx5(struct mlx5_ib_dev
*dev
, u8 rate
)
2244 if (rate
== IB_RATE_PORT_CURRENT
) {
2246 } else if (rate
< IB_RATE_2_5_GBPS
|| rate
> IB_RATE_300_GBPS
) {
2249 while (rate
!= IB_RATE_2_5_GBPS
&&
2250 !(1 << (rate
+ MLX5_STAT_RATE_OFFSET
) &
2251 MLX5_CAP_GEN(dev
->mdev
, stat_rate_support
)))
2255 return rate
+ MLX5_STAT_RATE_OFFSET
;
2258 static int modify_raw_packet_eth_prio(struct mlx5_core_dev
*dev
,
2259 struct mlx5_ib_sq
*sq
, u8 sl
)
2266 inlen
= MLX5_ST_SZ_BYTES(modify_tis_in
);
2267 in
= kvzalloc(inlen
, GFP_KERNEL
);
2271 MLX5_SET(modify_tis_in
, in
, bitmask
.prio
, 1);
2273 tisc
= MLX5_ADDR_OF(modify_tis_in
, in
, ctx
);
2274 MLX5_SET(tisc
, tisc
, prio
, ((sl
& 0x7) << 1));
2276 err
= mlx5_core_modify_tis(dev
, sq
->tisn
, in
, inlen
);
2283 static int modify_raw_packet_tx_affinity(struct mlx5_core_dev
*dev
,
2284 struct mlx5_ib_sq
*sq
, u8 tx_affinity
)
2291 inlen
= MLX5_ST_SZ_BYTES(modify_tis_in
);
2292 in
= kvzalloc(inlen
, GFP_KERNEL
);
2296 MLX5_SET(modify_tis_in
, in
, bitmask
.lag_tx_port_affinity
, 1);
2298 tisc
= MLX5_ADDR_OF(modify_tis_in
, in
, ctx
);
2299 MLX5_SET(tisc
, tisc
, lag_tx_port_affinity
, tx_affinity
);
2301 err
= mlx5_core_modify_tis(dev
, sq
->tisn
, in
, inlen
);
2308 static int mlx5_set_path(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
2309 const struct rdma_ah_attr
*ah
,
2310 struct mlx5_qp_path
*path
, u8 port
, int attr_mask
,
2311 u32 path_flags
, const struct ib_qp_attr
*attr
,
2314 const struct ib_global_route
*grh
= rdma_ah_read_grh(ah
);
2316 enum ib_gid_type gid_type
;
2317 u8 ah_flags
= rdma_ah_get_ah_flags(ah
);
2318 u8 sl
= rdma_ah_get_sl(ah
);
2320 if (attr_mask
& IB_QP_PKEY_INDEX
)
2321 path
->pkey_index
= cpu_to_be16(alt
? attr
->alt_pkey_index
:
2324 if (ah_flags
& IB_AH_GRH
) {
2325 if (grh
->sgid_index
>=
2326 dev
->mdev
->port_caps
[port
- 1].gid_table_len
) {
2327 pr_err("sgid_index (%u) too large. max is %d\n",
2329 dev
->mdev
->port_caps
[port
- 1].gid_table_len
);
2334 if (ah
->type
== RDMA_AH_ATTR_TYPE_ROCE
) {
2335 if (!(ah_flags
& IB_AH_GRH
))
2337 err
= mlx5_get_roce_gid_type(dev
, port
, grh
->sgid_index
,
2341 memcpy(path
->rmac
, ah
->roce
.dmac
, sizeof(ah
->roce
.dmac
));
2342 if (qp
->ibqp
.qp_type
== IB_QPT_RC
||
2343 qp
->ibqp
.qp_type
== IB_QPT_UC
||
2344 qp
->ibqp
.qp_type
== IB_QPT_XRC_INI
||
2345 qp
->ibqp
.qp_type
== IB_QPT_XRC_TGT
)
2346 path
->udp_sport
= mlx5_get_roce_udp_sport(dev
, port
,
2348 path
->dci_cfi_prio_sl
= (sl
& 0x7) << 4;
2349 if (gid_type
== IB_GID_TYPE_ROCE_UDP_ENCAP
)
2350 path
->ecn_dscp
= (grh
->traffic_class
>> 2) & 0x3f;
2352 path
->fl_free_ar
= (path_flags
& MLX5_PATH_FLAG_FL
) ? 0x80 : 0;
2354 (path_flags
& MLX5_PATH_FLAG_FREE_AR
) ? 0x40 : 0;
2355 path
->rlid
= cpu_to_be16(rdma_ah_get_dlid(ah
));
2356 path
->grh_mlid
= rdma_ah_get_path_bits(ah
) & 0x7f;
2357 if (ah_flags
& IB_AH_GRH
)
2358 path
->grh_mlid
|= 1 << 7;
2359 path
->dci_cfi_prio_sl
= sl
& 0xf;
2362 if (ah_flags
& IB_AH_GRH
) {
2363 path
->mgid_index
= grh
->sgid_index
;
2364 path
->hop_limit
= grh
->hop_limit
;
2365 path
->tclass_flowlabel
=
2366 cpu_to_be32((grh
->traffic_class
<< 20) |
2368 memcpy(path
->rgid
, grh
->dgid
.raw
, 16);
2371 err
= ib_rate_to_mlx5(dev
, rdma_ah_get_static_rate(ah
));
2374 path
->static_rate
= err
;
2377 if (attr_mask
& IB_QP_TIMEOUT
)
2378 path
->ackto_lt
= (alt
? attr
->alt_timeout
: attr
->timeout
) << 3;
2380 if ((qp
->ibqp
.qp_type
== IB_QPT_RAW_PACKET
) && qp
->sq
.wqe_cnt
)
2381 return modify_raw_packet_eth_prio(dev
->mdev
,
2382 &qp
->raw_packet_qp
.sq
,
2388 static enum mlx5_qp_optpar opt_mask
[MLX5_QP_NUM_STATE
][MLX5_QP_NUM_STATE
][MLX5_QP_ST_MAX
] = {
2389 [MLX5_QP_STATE_INIT
] = {
2390 [MLX5_QP_STATE_INIT
] = {
2391 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_RRE
|
2392 MLX5_QP_OPTPAR_RAE
|
2393 MLX5_QP_OPTPAR_RWE
|
2394 MLX5_QP_OPTPAR_PKEY_INDEX
|
2395 MLX5_QP_OPTPAR_PRI_PORT
,
2396 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_RWE
|
2397 MLX5_QP_OPTPAR_PKEY_INDEX
|
2398 MLX5_QP_OPTPAR_PRI_PORT
,
2399 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_PKEY_INDEX
|
2400 MLX5_QP_OPTPAR_Q_KEY
|
2401 MLX5_QP_OPTPAR_PRI_PORT
,
2403 [MLX5_QP_STATE_RTR
] = {
2404 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
2405 MLX5_QP_OPTPAR_RRE
|
2406 MLX5_QP_OPTPAR_RAE
|
2407 MLX5_QP_OPTPAR_RWE
|
2408 MLX5_QP_OPTPAR_PKEY_INDEX
,
2409 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
2410 MLX5_QP_OPTPAR_RWE
|
2411 MLX5_QP_OPTPAR_PKEY_INDEX
,
2412 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_PKEY_INDEX
|
2413 MLX5_QP_OPTPAR_Q_KEY
,
2414 [MLX5_QP_ST_MLX
] = MLX5_QP_OPTPAR_PKEY_INDEX
|
2415 MLX5_QP_OPTPAR_Q_KEY
,
2416 [MLX5_QP_ST_XRC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
2417 MLX5_QP_OPTPAR_RRE
|
2418 MLX5_QP_OPTPAR_RAE
|
2419 MLX5_QP_OPTPAR_RWE
|
2420 MLX5_QP_OPTPAR_PKEY_INDEX
,
2423 [MLX5_QP_STATE_RTR
] = {
2424 [MLX5_QP_STATE_RTS
] = {
2425 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
2426 MLX5_QP_OPTPAR_RRE
|
2427 MLX5_QP_OPTPAR_RAE
|
2428 MLX5_QP_OPTPAR_RWE
|
2429 MLX5_QP_OPTPAR_PM_STATE
|
2430 MLX5_QP_OPTPAR_RNR_TIMEOUT
,
2431 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
2432 MLX5_QP_OPTPAR_RWE
|
2433 MLX5_QP_OPTPAR_PM_STATE
,
2434 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_Q_KEY
,
2437 [MLX5_QP_STATE_RTS
] = {
2438 [MLX5_QP_STATE_RTS
] = {
2439 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_RRE
|
2440 MLX5_QP_OPTPAR_RAE
|
2441 MLX5_QP_OPTPAR_RWE
|
2442 MLX5_QP_OPTPAR_RNR_TIMEOUT
|
2443 MLX5_QP_OPTPAR_PM_STATE
|
2444 MLX5_QP_OPTPAR_ALT_ADDR_PATH
,
2445 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_RWE
|
2446 MLX5_QP_OPTPAR_PM_STATE
|
2447 MLX5_QP_OPTPAR_ALT_ADDR_PATH
,
2448 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_Q_KEY
|
2449 MLX5_QP_OPTPAR_SRQN
|
2450 MLX5_QP_OPTPAR_CQN_RCV
,
2453 [MLX5_QP_STATE_SQER
] = {
2454 [MLX5_QP_STATE_RTS
] = {
2455 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_Q_KEY
,
2456 [MLX5_QP_ST_MLX
] = MLX5_QP_OPTPAR_Q_KEY
,
2457 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_RWE
,
2458 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_RNR_TIMEOUT
|
2459 MLX5_QP_OPTPAR_RWE
|
2460 MLX5_QP_OPTPAR_RAE
|
2466 static int ib_nr_to_mlx5_nr(int ib_mask
)
2471 case IB_QP_CUR_STATE
:
2473 case IB_QP_EN_SQD_ASYNC_NOTIFY
:
2475 case IB_QP_ACCESS_FLAGS
:
2476 return MLX5_QP_OPTPAR_RWE
| MLX5_QP_OPTPAR_RRE
|
2478 case IB_QP_PKEY_INDEX
:
2479 return MLX5_QP_OPTPAR_PKEY_INDEX
;
2481 return MLX5_QP_OPTPAR_PRI_PORT
;
2483 return MLX5_QP_OPTPAR_Q_KEY
;
2485 return MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH
|
2486 MLX5_QP_OPTPAR_PRI_PORT
;
2487 case IB_QP_PATH_MTU
:
2490 return MLX5_QP_OPTPAR_ACK_TIMEOUT
;
2491 case IB_QP_RETRY_CNT
:
2492 return MLX5_QP_OPTPAR_RETRY_COUNT
;
2493 case IB_QP_RNR_RETRY
:
2494 return MLX5_QP_OPTPAR_RNR_RETRY
;
2497 case IB_QP_MAX_QP_RD_ATOMIC
:
2498 return MLX5_QP_OPTPAR_SRA_MAX
;
2499 case IB_QP_ALT_PATH
:
2500 return MLX5_QP_OPTPAR_ALT_ADDR_PATH
;
2501 case IB_QP_MIN_RNR_TIMER
:
2502 return MLX5_QP_OPTPAR_RNR_TIMEOUT
;
2505 case IB_QP_MAX_DEST_RD_ATOMIC
:
2506 return MLX5_QP_OPTPAR_RRA_MAX
| MLX5_QP_OPTPAR_RWE
|
2507 MLX5_QP_OPTPAR_RRE
| MLX5_QP_OPTPAR_RAE
;
2508 case IB_QP_PATH_MIG_STATE
:
2509 return MLX5_QP_OPTPAR_PM_STATE
;
2512 case IB_QP_DEST_QPN
:
2518 static int ib_mask_to_mlx5_opt(int ib_mask
)
2523 for (i
= 0; i
< 8 * sizeof(int); i
++) {
2524 if ((1 << i
) & ib_mask
)
2525 result
|= ib_nr_to_mlx5_nr(1 << i
);
2531 static int modify_raw_packet_qp_rq(struct mlx5_ib_dev
*dev
,
2532 struct mlx5_ib_rq
*rq
, int new_state
,
2533 const struct mlx5_modify_raw_qp_param
*raw_qp_param
)
2540 inlen
= MLX5_ST_SZ_BYTES(modify_rq_in
);
2541 in
= kvzalloc(inlen
, GFP_KERNEL
);
2545 MLX5_SET(modify_rq_in
, in
, rq_state
, rq
->state
);
2547 rqc
= MLX5_ADDR_OF(modify_rq_in
, in
, ctx
);
2548 MLX5_SET(rqc
, rqc
, state
, new_state
);
2550 if (raw_qp_param
->set_mask
& MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID
) {
2551 if (MLX5_CAP_GEN(dev
->mdev
, modify_rq_counter_set_id
)) {
2552 MLX5_SET64(modify_rq_in
, in
, modify_bitmask
,
2553 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID
);
2554 MLX5_SET(rqc
, rqc
, counter_set_id
, raw_qp_param
->rq_q_ctr_id
);
2556 pr_info_once("%s: RAW PACKET QP counters are not supported on current FW\n",
2560 err
= mlx5_core_modify_rq(dev
->mdev
, rq
->base
.mqp
.qpn
, in
, inlen
);
2564 rq
->state
= new_state
;
2571 static int modify_raw_packet_qp_sq(struct mlx5_core_dev
*dev
,
2572 struct mlx5_ib_sq
*sq
,
2574 const struct mlx5_modify_raw_qp_param
*raw_qp_param
)
2576 struct mlx5_ib_qp
*ibqp
= sq
->base
.container_mibqp
;
2577 u32 old_rate
= ibqp
->rate_limit
;
2578 u32 new_rate
= old_rate
;
2585 inlen
= MLX5_ST_SZ_BYTES(modify_sq_in
);
2586 in
= kvzalloc(inlen
, GFP_KERNEL
);
2590 MLX5_SET(modify_sq_in
, in
, sq_state
, sq
->state
);
2592 sqc
= MLX5_ADDR_OF(modify_sq_in
, in
, ctx
);
2593 MLX5_SET(sqc
, sqc
, state
, new_state
);
2595 if (raw_qp_param
->set_mask
& MLX5_RAW_QP_RATE_LIMIT
) {
2596 if (new_state
!= MLX5_SQC_STATE_RDY
)
2597 pr_warn("%s: Rate limit can only be changed when SQ is moving to RDY\n",
2600 new_rate
= raw_qp_param
->rate_limit
;
2603 if (old_rate
!= new_rate
) {
2605 err
= mlx5_rl_add_rate(dev
, new_rate
, &rl_index
);
2607 pr_err("Failed configuring rate %u: %d\n",
2613 MLX5_SET64(modify_sq_in
, in
, modify_bitmask
, 1);
2614 MLX5_SET(sqc
, sqc
, packet_pacing_rate_limit_index
, rl_index
);
2617 err
= mlx5_core_modify_sq(dev
, sq
->base
.mqp
.qpn
, in
, inlen
);
2619 /* Remove new rate from table if failed */
2621 old_rate
!= new_rate
)
2622 mlx5_rl_remove_rate(dev
, new_rate
);
2626 /* Only remove the old rate after new rate was set */
2628 (old_rate
!= new_rate
)) ||
2629 (new_state
!= MLX5_SQC_STATE_RDY
))
2630 mlx5_rl_remove_rate(dev
, old_rate
);
2632 ibqp
->rate_limit
= new_rate
;
2633 sq
->state
= new_state
;
2640 static int modify_raw_packet_qp(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
2641 const struct mlx5_modify_raw_qp_param
*raw_qp_param
,
2644 struct mlx5_ib_raw_packet_qp
*raw_packet_qp
= &qp
->raw_packet_qp
;
2645 struct mlx5_ib_rq
*rq
= &raw_packet_qp
->rq
;
2646 struct mlx5_ib_sq
*sq
= &raw_packet_qp
->sq
;
2647 int modify_rq
= !!qp
->rq
.wqe_cnt
;
2648 int modify_sq
= !!qp
->sq
.wqe_cnt
;
2653 switch (raw_qp_param
->operation
) {
2654 case MLX5_CMD_OP_RST2INIT_QP
:
2655 rq_state
= MLX5_RQC_STATE_RDY
;
2656 sq_state
= MLX5_SQC_STATE_RDY
;
2658 case MLX5_CMD_OP_2ERR_QP
:
2659 rq_state
= MLX5_RQC_STATE_ERR
;
2660 sq_state
= MLX5_SQC_STATE_ERR
;
2662 case MLX5_CMD_OP_2RST_QP
:
2663 rq_state
= MLX5_RQC_STATE_RST
;
2664 sq_state
= MLX5_SQC_STATE_RST
;
2666 case MLX5_CMD_OP_RTR2RTS_QP
:
2667 case MLX5_CMD_OP_RTS2RTS_QP
:
2668 if (raw_qp_param
->set_mask
==
2669 MLX5_RAW_QP_RATE_LIMIT
) {
2671 sq_state
= sq
->state
;
2673 return raw_qp_param
->set_mask
? -EINVAL
: 0;
2676 case MLX5_CMD_OP_INIT2INIT_QP
:
2677 case MLX5_CMD_OP_INIT2RTR_QP
:
2678 if (raw_qp_param
->set_mask
)
2688 err
= modify_raw_packet_qp_rq(dev
, rq
, rq_state
, raw_qp_param
);
2695 err
= modify_raw_packet_tx_affinity(dev
->mdev
, sq
,
2701 return modify_raw_packet_qp_sq(dev
->mdev
, sq
, sq_state
, raw_qp_param
);
2707 static int __mlx5_ib_modify_qp(struct ib_qp
*ibqp
,
2708 const struct ib_qp_attr
*attr
, int attr_mask
,
2709 enum ib_qp_state cur_state
, enum ib_qp_state new_state
)
2711 static const u16 optab
[MLX5_QP_NUM_STATE
][MLX5_QP_NUM_STATE
] = {
2712 [MLX5_QP_STATE_RST
] = {
2713 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
2714 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
2715 [MLX5_QP_STATE_INIT
] = MLX5_CMD_OP_RST2INIT_QP
,
2717 [MLX5_QP_STATE_INIT
] = {
2718 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
2719 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
2720 [MLX5_QP_STATE_INIT
] = MLX5_CMD_OP_INIT2INIT_QP
,
2721 [MLX5_QP_STATE_RTR
] = MLX5_CMD_OP_INIT2RTR_QP
,
2723 [MLX5_QP_STATE_RTR
] = {
2724 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
2725 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
2726 [MLX5_QP_STATE_RTS
] = MLX5_CMD_OP_RTR2RTS_QP
,
2728 [MLX5_QP_STATE_RTS
] = {
2729 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
2730 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
2731 [MLX5_QP_STATE_RTS
] = MLX5_CMD_OP_RTS2RTS_QP
,
2733 [MLX5_QP_STATE_SQD
] = {
2734 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
2735 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
2737 [MLX5_QP_STATE_SQER
] = {
2738 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
2739 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
2740 [MLX5_QP_STATE_RTS
] = MLX5_CMD_OP_SQERR2RTS_QP
,
2742 [MLX5_QP_STATE_ERR
] = {
2743 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
2744 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
2748 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
2749 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
2750 struct mlx5_ib_qp_base
*base
= &qp
->trans_qp
.base
;
2751 struct mlx5_ib_cq
*send_cq
, *recv_cq
;
2752 struct mlx5_qp_context
*context
;
2753 struct mlx5_ib_pd
*pd
;
2754 struct mlx5_ib_port
*mibport
= NULL
;
2755 enum mlx5_qp_state mlx5_cur
, mlx5_new
;
2756 enum mlx5_qp_optpar optpar
;
2762 context
= kzalloc(sizeof(*context
), GFP_KERNEL
);
2766 err
= to_mlx5_st(ibqp
->qp_type
);
2768 mlx5_ib_dbg(dev
, "unsupported qp type %d\n", ibqp
->qp_type
);
2772 context
->flags
= cpu_to_be32(err
<< 16);
2774 if (!(attr_mask
& IB_QP_PATH_MIG_STATE
)) {
2775 context
->flags
|= cpu_to_be32(MLX5_QP_PM_MIGRATED
<< 11);
2777 switch (attr
->path_mig_state
) {
2778 case IB_MIG_MIGRATED
:
2779 context
->flags
|= cpu_to_be32(MLX5_QP_PM_MIGRATED
<< 11);
2782 context
->flags
|= cpu_to_be32(MLX5_QP_PM_REARM
<< 11);
2785 context
->flags
|= cpu_to_be32(MLX5_QP_PM_ARMED
<< 11);
2790 if ((cur_state
== IB_QPS_RESET
) && (new_state
== IB_QPS_INIT
)) {
2791 if ((ibqp
->qp_type
== IB_QPT_RC
) ||
2792 (ibqp
->qp_type
== IB_QPT_UD
&&
2793 !(qp
->flags
& MLX5_IB_QP_SQPN_QP1
)) ||
2794 (ibqp
->qp_type
== IB_QPT_UC
) ||
2795 (ibqp
->qp_type
== IB_QPT_RAW_PACKET
) ||
2796 (ibqp
->qp_type
== IB_QPT_XRC_INI
) ||
2797 (ibqp
->qp_type
== IB_QPT_XRC_TGT
)) {
2798 if (mlx5_lag_is_active(dev
->mdev
)) {
2799 tx_affinity
= (unsigned int)atomic_add_return(1,
2800 &dev
->roce
.next_port
) %
2802 context
->flags
|= cpu_to_be32(tx_affinity
<< 24);
2807 if (is_sqp(ibqp
->qp_type
)) {
2808 context
->mtu_msgmax
= (IB_MTU_256
<< 5) | 8;
2809 } else if ((ibqp
->qp_type
== IB_QPT_UD
&&
2810 !(qp
->flags
& MLX5_IB_QP_UNDERLAY
)) ||
2811 ibqp
->qp_type
== MLX5_IB_QPT_REG_UMR
) {
2812 context
->mtu_msgmax
= (IB_MTU_4096
<< 5) | 12;
2813 } else if (attr_mask
& IB_QP_PATH_MTU
) {
2814 if (attr
->path_mtu
< IB_MTU_256
||
2815 attr
->path_mtu
> IB_MTU_4096
) {
2816 mlx5_ib_warn(dev
, "invalid mtu %d\n", attr
->path_mtu
);
2820 context
->mtu_msgmax
= (attr
->path_mtu
<< 5) |
2821 (u8
)MLX5_CAP_GEN(dev
->mdev
, log_max_msg
);
2824 if (attr_mask
& IB_QP_DEST_QPN
)
2825 context
->log_pg_sz_remote_qpn
= cpu_to_be32(attr
->dest_qp_num
);
2827 if (attr_mask
& IB_QP_PKEY_INDEX
)
2828 context
->pri_path
.pkey_index
= cpu_to_be16(attr
->pkey_index
);
2830 /* todo implement counter_index functionality */
2832 if (is_sqp(ibqp
->qp_type
))
2833 context
->pri_path
.port
= qp
->port
;
2835 if (attr_mask
& IB_QP_PORT
)
2836 context
->pri_path
.port
= attr
->port_num
;
2838 if (attr_mask
& IB_QP_AV
) {
2839 err
= mlx5_set_path(dev
, qp
, &attr
->ah_attr
, &context
->pri_path
,
2840 attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
,
2841 attr_mask
, 0, attr
, false);
2846 if (attr_mask
& IB_QP_TIMEOUT
)
2847 context
->pri_path
.ackto_lt
|= attr
->timeout
<< 3;
2849 if (attr_mask
& IB_QP_ALT_PATH
) {
2850 err
= mlx5_set_path(dev
, qp
, &attr
->alt_ah_attr
,
2853 attr_mask
| IB_QP_PKEY_INDEX
| IB_QP_TIMEOUT
,
2860 get_cqs(qp
->ibqp
.qp_type
, qp
->ibqp
.send_cq
, qp
->ibqp
.recv_cq
,
2861 &send_cq
, &recv_cq
);
2863 context
->flags_pd
= cpu_to_be32(pd
? pd
->pdn
: to_mpd(dev
->devr
.p0
)->pdn
);
2864 context
->cqn_send
= send_cq
? cpu_to_be32(send_cq
->mcq
.cqn
) : 0;
2865 context
->cqn_recv
= recv_cq
? cpu_to_be32(recv_cq
->mcq
.cqn
) : 0;
2866 context
->params1
= cpu_to_be32(MLX5_IB_ACK_REQ_FREQ
<< 28);
2868 if (attr_mask
& IB_QP_RNR_RETRY
)
2869 context
->params1
|= cpu_to_be32(attr
->rnr_retry
<< 13);
2871 if (attr_mask
& IB_QP_RETRY_CNT
)
2872 context
->params1
|= cpu_to_be32(attr
->retry_cnt
<< 16);
2874 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
) {
2875 if (attr
->max_rd_atomic
)
2877 cpu_to_be32(fls(attr
->max_rd_atomic
- 1) << 21);
2880 if (attr_mask
& IB_QP_SQ_PSN
)
2881 context
->next_send_psn
= cpu_to_be32(attr
->sq_psn
);
2883 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
) {
2884 if (attr
->max_dest_rd_atomic
)
2886 cpu_to_be32(fls(attr
->max_dest_rd_atomic
- 1) << 21);
2889 if (attr_mask
& (IB_QP_ACCESS_FLAGS
| IB_QP_MAX_DEST_RD_ATOMIC
))
2890 context
->params2
|= to_mlx5_access_flags(qp
, attr
, attr_mask
);
2892 if (attr_mask
& IB_QP_MIN_RNR_TIMER
)
2893 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->min_rnr_timer
<< 24);
2895 if (attr_mask
& IB_QP_RQ_PSN
)
2896 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->rq_psn
);
2898 if (attr_mask
& IB_QP_QKEY
)
2899 context
->qkey
= cpu_to_be32(attr
->qkey
);
2901 if (qp
->rq
.wqe_cnt
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
2902 context
->db_rec_addr
= cpu_to_be64(qp
->db
.dma
);
2904 if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
) {
2905 u8 port_num
= (attr_mask
& IB_QP_PORT
? attr
->port_num
:
2908 /* Underlay port should be used - index 0 function per port */
2909 if (qp
->flags
& MLX5_IB_QP_UNDERLAY
)
2912 mibport
= &dev
->port
[port_num
];
2913 context
->qp_counter_set_usr_page
|=
2914 cpu_to_be32((u32
)(mibport
->cnts
.set_id
) << 24);
2917 if (!ibqp
->uobject
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
2918 context
->sq_crq_size
|= cpu_to_be16(1 << 4);
2920 if (qp
->flags
& MLX5_IB_QP_SQPN_QP1
)
2921 context
->deth_sqpn
= cpu_to_be32(1);
2923 mlx5_cur
= to_mlx5_state(cur_state
);
2924 mlx5_new
= to_mlx5_state(new_state
);
2925 mlx5_st
= to_mlx5_st(ibqp
->qp_type
);
2929 if (mlx5_cur
>= MLX5_QP_NUM_STATE
|| mlx5_new
>= MLX5_QP_NUM_STATE
||
2930 !optab
[mlx5_cur
][mlx5_new
])
2933 op
= optab
[mlx5_cur
][mlx5_new
];
2934 optpar
= ib_mask_to_mlx5_opt(attr_mask
);
2935 optpar
&= opt_mask
[mlx5_cur
][mlx5_new
][mlx5_st
];
2937 if (qp
->ibqp
.qp_type
== IB_QPT_RAW_PACKET
||
2938 qp
->flags
& MLX5_IB_QP_UNDERLAY
) {
2939 struct mlx5_modify_raw_qp_param raw_qp_param
= {};
2941 raw_qp_param
.operation
= op
;
2942 if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
) {
2943 raw_qp_param
.rq_q_ctr_id
= mibport
->cnts
.set_id
;
2944 raw_qp_param
.set_mask
|= MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID
;
2947 if (attr_mask
& IB_QP_RATE_LIMIT
) {
2948 raw_qp_param
.rate_limit
= attr
->rate_limit
;
2949 raw_qp_param
.set_mask
|= MLX5_RAW_QP_RATE_LIMIT
;
2952 err
= modify_raw_packet_qp(dev
, qp
, &raw_qp_param
, tx_affinity
);
2954 err
= mlx5_core_qp_modify(dev
->mdev
, op
, optpar
, context
,
2961 qp
->state
= new_state
;
2963 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
2964 qp
->trans_qp
.atomic_rd_en
= attr
->qp_access_flags
;
2965 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
2966 qp
->trans_qp
.resp_depth
= attr
->max_dest_rd_atomic
;
2967 if (attr_mask
& IB_QP_PORT
)
2968 qp
->port
= attr
->port_num
;
2969 if (attr_mask
& IB_QP_ALT_PATH
)
2970 qp
->trans_qp
.alt_port
= attr
->alt_port_num
;
2973 * If we moved a kernel QP to RESET, clean up all old CQ
2974 * entries and reinitialize the QP.
2976 if (new_state
== IB_QPS_RESET
&& !ibqp
->uobject
) {
2977 mlx5_ib_cq_clean(recv_cq
, base
->mqp
.qpn
,
2978 ibqp
->srq
? to_msrq(ibqp
->srq
) : NULL
);
2979 if (send_cq
!= recv_cq
)
2980 mlx5_ib_cq_clean(send_cq
, base
->mqp
.qpn
, NULL
);
2986 qp
->sq
.cur_post
= 0;
2987 qp
->sq
.last_poll
= 0;
2988 qp
->db
.db
[MLX5_RCV_DBR
] = 0;
2989 qp
->db
.db
[MLX5_SND_DBR
] = 0;
2997 int mlx5_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
2998 int attr_mask
, struct ib_udata
*udata
)
3000 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
3001 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
3002 enum ib_qp_type qp_type
;
3003 enum ib_qp_state cur_state
, new_state
;
3006 enum rdma_link_layer ll
= IB_LINK_LAYER_UNSPECIFIED
;
3008 if (ibqp
->rwq_ind_tbl
)
3011 if (unlikely(ibqp
->qp_type
== IB_QPT_GSI
))
3012 return mlx5_ib_gsi_modify_qp(ibqp
, attr
, attr_mask
);
3014 qp_type
= (unlikely(ibqp
->qp_type
== MLX5_IB_QPT_HW_GSI
)) ?
3015 IB_QPT_GSI
: ibqp
->qp_type
;
3017 mutex_lock(&qp
->mutex
);
3019 cur_state
= attr_mask
& IB_QP_CUR_STATE
? attr
->cur_qp_state
: qp
->state
;
3020 new_state
= attr_mask
& IB_QP_STATE
? attr
->qp_state
: cur_state
;
3022 if (!(cur_state
== new_state
&& cur_state
== IB_QPS_RESET
)) {
3023 port
= attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
;
3024 ll
= dev
->ib_dev
.get_link_layer(&dev
->ib_dev
, port
);
3027 if (qp
->flags
& MLX5_IB_QP_UNDERLAY
) {
3028 if (attr_mask
& ~(IB_QP_STATE
| IB_QP_CUR_STATE
)) {
3029 mlx5_ib_dbg(dev
, "invalid attr_mask 0x%x when underlay QP is used\n",
3033 } else if (qp_type
!= MLX5_IB_QPT_REG_UMR
&&
3034 !ib_modify_qp_is_ok(cur_state
, new_state
, qp_type
, attr_mask
, ll
)) {
3035 mlx5_ib_dbg(dev
, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n",
3036 cur_state
, new_state
, ibqp
->qp_type
, attr_mask
);
3040 if ((attr_mask
& IB_QP_PORT
) &&
3041 (attr
->port_num
== 0 ||
3042 attr
->port_num
> MLX5_CAP_GEN(dev
->mdev
, num_ports
))) {
3043 mlx5_ib_dbg(dev
, "invalid port number %d. number of ports is %d\n",
3044 attr
->port_num
, dev
->num_ports
);
3048 if (attr_mask
& IB_QP_PKEY_INDEX
) {
3049 port
= attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
;
3050 if (attr
->pkey_index
>=
3051 dev
->mdev
->port_caps
[port
- 1].pkey_table_len
) {
3052 mlx5_ib_dbg(dev
, "invalid pkey index %d\n",
3058 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
&&
3059 attr
->max_rd_atomic
>
3060 (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_ra_res_qp
))) {
3061 mlx5_ib_dbg(dev
, "invalid max_rd_atomic value %d\n",
3062 attr
->max_rd_atomic
);
3066 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
&&
3067 attr
->max_dest_rd_atomic
>
3068 (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_ra_req_qp
))) {
3069 mlx5_ib_dbg(dev
, "invalid max_dest_rd_atomic value %d\n",
3070 attr
->max_dest_rd_atomic
);
3074 if (cur_state
== new_state
&& cur_state
== IB_QPS_RESET
) {
3079 err
= __mlx5_ib_modify_qp(ibqp
, attr
, attr_mask
, cur_state
, new_state
);
3082 mutex_unlock(&qp
->mutex
);
3086 static int mlx5_wq_overflow(struct mlx5_ib_wq
*wq
, int nreq
, struct ib_cq
*ib_cq
)
3088 struct mlx5_ib_cq
*cq
;
3091 cur
= wq
->head
- wq
->tail
;
3092 if (likely(cur
+ nreq
< wq
->max_post
))
3096 spin_lock(&cq
->lock
);
3097 cur
= wq
->head
- wq
->tail
;
3098 spin_unlock(&cq
->lock
);
3100 return cur
+ nreq
>= wq
->max_post
;
3103 static __always_inline
void set_raddr_seg(struct mlx5_wqe_raddr_seg
*rseg
,
3104 u64 remote_addr
, u32 rkey
)
3106 rseg
->raddr
= cpu_to_be64(remote_addr
);
3107 rseg
->rkey
= cpu_to_be32(rkey
);
3111 static void *set_eth_seg(struct mlx5_wqe_eth_seg
*eseg
,
3112 struct ib_send_wr
*wr
, void *qend
,
3113 struct mlx5_ib_qp
*qp
, int *size
)
3117 memset(eseg
, 0, sizeof(struct mlx5_wqe_eth_seg
));
3119 if (wr
->send_flags
& IB_SEND_IP_CSUM
)
3120 eseg
->cs_flags
= MLX5_ETH_WQE_L3_CSUM
|
3121 MLX5_ETH_WQE_L4_CSUM
;
3123 seg
+= sizeof(struct mlx5_wqe_eth_seg
);
3124 *size
+= sizeof(struct mlx5_wqe_eth_seg
) / 16;
3126 if (wr
->opcode
== IB_WR_LSO
) {
3127 struct ib_ud_wr
*ud_wr
= container_of(wr
, struct ib_ud_wr
, wr
);
3128 int size_of_inl_hdr_start
= sizeof(eseg
->inline_hdr
.start
);
3129 u64 left
, leftlen
, copysz
;
3130 void *pdata
= ud_wr
->header
;
3133 eseg
->mss
= cpu_to_be16(ud_wr
->mss
);
3134 eseg
->inline_hdr
.sz
= cpu_to_be16(left
);
3137 * check if there is space till the end of queue, if yes,
3138 * copy all in one shot, otherwise copy till the end of queue,
3139 * rollback and than the copy the left
3141 leftlen
= qend
- (void *)eseg
->inline_hdr
.start
;
3142 copysz
= min_t(u64
, leftlen
, left
);
3144 memcpy(seg
- size_of_inl_hdr_start
, pdata
, copysz
);
3146 if (likely(copysz
> size_of_inl_hdr_start
)) {
3147 seg
+= ALIGN(copysz
- size_of_inl_hdr_start
, 16);
3148 *size
+= ALIGN(copysz
- size_of_inl_hdr_start
, 16) / 16;
3151 if (unlikely(copysz
< left
)) { /* the last wqe in the queue */
3152 seg
= mlx5_get_send_wqe(qp
, 0);
3155 memcpy(seg
, pdata
, left
);
3156 seg
+= ALIGN(left
, 16);
3157 *size
+= ALIGN(left
, 16) / 16;
3164 static void set_datagram_seg(struct mlx5_wqe_datagram_seg
*dseg
,
3165 struct ib_send_wr
*wr
)
3167 memcpy(&dseg
->av
, &to_mah(ud_wr(wr
)->ah
)->av
, sizeof(struct mlx5_av
));
3168 dseg
->av
.dqp_dct
= cpu_to_be32(ud_wr(wr
)->remote_qpn
| MLX5_EXTENDED_UD_AV
);
3169 dseg
->av
.key
.qkey
.qkey
= cpu_to_be32(ud_wr(wr
)->remote_qkey
);
3172 static void set_data_ptr_seg(struct mlx5_wqe_data_seg
*dseg
, struct ib_sge
*sg
)
3174 dseg
->byte_count
= cpu_to_be32(sg
->length
);
3175 dseg
->lkey
= cpu_to_be32(sg
->lkey
);
3176 dseg
->addr
= cpu_to_be64(sg
->addr
);
3179 static u64
get_xlt_octo(u64 bytes
)
3181 return ALIGN(bytes
, MLX5_IB_UMR_XLT_ALIGNMENT
) /
3182 MLX5_IB_UMR_OCTOWORD
;
3185 static __be64
frwr_mkey_mask(void)
3189 result
= MLX5_MKEY_MASK_LEN
|
3190 MLX5_MKEY_MASK_PAGE_SIZE
|
3191 MLX5_MKEY_MASK_START_ADDR
|
3192 MLX5_MKEY_MASK_EN_RINVAL
|
3193 MLX5_MKEY_MASK_KEY
|
3199 MLX5_MKEY_MASK_SMALL_FENCE
|
3200 MLX5_MKEY_MASK_FREE
;
3202 return cpu_to_be64(result
);
3205 static __be64
sig_mkey_mask(void)
3209 result
= MLX5_MKEY_MASK_LEN
|
3210 MLX5_MKEY_MASK_PAGE_SIZE
|
3211 MLX5_MKEY_MASK_START_ADDR
|
3212 MLX5_MKEY_MASK_EN_SIGERR
|
3213 MLX5_MKEY_MASK_EN_RINVAL
|
3214 MLX5_MKEY_MASK_KEY
|
3219 MLX5_MKEY_MASK_SMALL_FENCE
|
3220 MLX5_MKEY_MASK_FREE
|
3221 MLX5_MKEY_MASK_BSF_EN
;
3223 return cpu_to_be64(result
);
3226 static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg
*umr
,
3227 struct mlx5_ib_mr
*mr
)
3229 int size
= mr
->ndescs
* mr
->desc_size
;
3231 memset(umr
, 0, sizeof(*umr
));
3233 umr
->flags
= MLX5_UMR_CHECK_NOT_FREE
;
3234 umr
->xlt_octowords
= cpu_to_be16(get_xlt_octo(size
));
3235 umr
->mkey_mask
= frwr_mkey_mask();
3238 static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg
*umr
)
3240 memset(umr
, 0, sizeof(*umr
));
3241 umr
->mkey_mask
= cpu_to_be64(MLX5_MKEY_MASK_FREE
);
3242 umr
->flags
= MLX5_UMR_INLINE
;
3245 static __be64
get_umr_enable_mr_mask(void)
3249 result
= MLX5_MKEY_MASK_KEY
|
3250 MLX5_MKEY_MASK_FREE
;
3252 return cpu_to_be64(result
);
3255 static __be64
get_umr_disable_mr_mask(void)
3259 result
= MLX5_MKEY_MASK_FREE
;
3261 return cpu_to_be64(result
);
3264 static __be64
get_umr_update_translation_mask(void)
3268 result
= MLX5_MKEY_MASK_LEN
|
3269 MLX5_MKEY_MASK_PAGE_SIZE
|
3270 MLX5_MKEY_MASK_START_ADDR
;
3272 return cpu_to_be64(result
);
3275 static __be64
get_umr_update_access_mask(int atomic
)
3279 result
= MLX5_MKEY_MASK_LR
|
3285 result
|= MLX5_MKEY_MASK_A
;
3287 return cpu_to_be64(result
);
3290 static __be64
get_umr_update_pd_mask(void)
3294 result
= MLX5_MKEY_MASK_PD
;
3296 return cpu_to_be64(result
);
3299 static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg
*umr
,
3300 struct ib_send_wr
*wr
, int atomic
)
3302 struct mlx5_umr_wr
*umrwr
= umr_wr(wr
);
3304 memset(umr
, 0, sizeof(*umr
));
3306 if (wr
->send_flags
& MLX5_IB_SEND_UMR_FAIL_IF_FREE
)
3307 umr
->flags
= MLX5_UMR_CHECK_FREE
; /* fail if free */
3309 umr
->flags
= MLX5_UMR_CHECK_NOT_FREE
; /* fail if not free */
3311 umr
->xlt_octowords
= cpu_to_be16(get_xlt_octo(umrwr
->xlt_size
));
3312 if (wr
->send_flags
& MLX5_IB_SEND_UMR_UPDATE_XLT
) {
3313 u64 offset
= get_xlt_octo(umrwr
->offset
);
3315 umr
->xlt_offset
= cpu_to_be16(offset
& 0xffff);
3316 umr
->xlt_offset_47_16
= cpu_to_be32(offset
>> 16);
3317 umr
->flags
|= MLX5_UMR_TRANSLATION_OFFSET_EN
;
3319 if (wr
->send_flags
& MLX5_IB_SEND_UMR_UPDATE_TRANSLATION
)
3320 umr
->mkey_mask
|= get_umr_update_translation_mask();
3321 if (wr
->send_flags
& MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS
) {
3322 umr
->mkey_mask
|= get_umr_update_access_mask(atomic
);
3323 umr
->mkey_mask
|= get_umr_update_pd_mask();
3325 if (wr
->send_flags
& MLX5_IB_SEND_UMR_ENABLE_MR
)
3326 umr
->mkey_mask
|= get_umr_enable_mr_mask();
3327 if (wr
->send_flags
& MLX5_IB_SEND_UMR_DISABLE_MR
)
3328 umr
->mkey_mask
|= get_umr_disable_mr_mask();
3331 umr
->flags
|= MLX5_UMR_INLINE
;
3334 static u8
get_umr_flags(int acc
)
3336 return (acc
& IB_ACCESS_REMOTE_ATOMIC
? MLX5_PERM_ATOMIC
: 0) |
3337 (acc
& IB_ACCESS_REMOTE_WRITE
? MLX5_PERM_REMOTE_WRITE
: 0) |
3338 (acc
& IB_ACCESS_REMOTE_READ
? MLX5_PERM_REMOTE_READ
: 0) |
3339 (acc
& IB_ACCESS_LOCAL_WRITE
? MLX5_PERM_LOCAL_WRITE
: 0) |
3340 MLX5_PERM_LOCAL_READ
| MLX5_PERM_UMR_EN
;
3343 static void set_reg_mkey_seg(struct mlx5_mkey_seg
*seg
,
3344 struct mlx5_ib_mr
*mr
,
3345 u32 key
, int access
)
3347 int ndescs
= ALIGN(mr
->ndescs
, 8) >> 1;
3349 memset(seg
, 0, sizeof(*seg
));
3351 if (mr
->access_mode
== MLX5_MKC_ACCESS_MODE_MTT
)
3352 seg
->log2_page_size
= ilog2(mr
->ibmr
.page_size
);
3353 else if (mr
->access_mode
== MLX5_MKC_ACCESS_MODE_KLMS
)
3354 /* KLMs take twice the size of MTTs */
3357 seg
->flags
= get_umr_flags(access
) | mr
->access_mode
;
3358 seg
->qpn_mkey7_0
= cpu_to_be32((key
& 0xff) | 0xffffff00);
3359 seg
->flags_pd
= cpu_to_be32(MLX5_MKEY_REMOTE_INVAL
);
3360 seg
->start_addr
= cpu_to_be64(mr
->ibmr
.iova
);
3361 seg
->len
= cpu_to_be64(mr
->ibmr
.length
);
3362 seg
->xlt_oct_size
= cpu_to_be32(ndescs
);
3365 static void set_linv_mkey_seg(struct mlx5_mkey_seg
*seg
)
3367 memset(seg
, 0, sizeof(*seg
));
3368 seg
->status
= MLX5_MKEY_STATUS_FREE
;
3371 static void set_reg_mkey_segment(struct mlx5_mkey_seg
*seg
, struct ib_send_wr
*wr
)
3373 struct mlx5_umr_wr
*umrwr
= umr_wr(wr
);
3375 memset(seg
, 0, sizeof(*seg
));
3376 if (wr
->send_flags
& MLX5_IB_SEND_UMR_DISABLE_MR
)
3377 seg
->status
= MLX5_MKEY_STATUS_FREE
;
3379 seg
->flags
= convert_access(umrwr
->access_flags
);
3381 seg
->flags_pd
= cpu_to_be32(to_mpd(umrwr
->pd
)->pdn
);
3382 if (wr
->send_flags
& MLX5_IB_SEND_UMR_UPDATE_TRANSLATION
&&
3384 seg
->flags_pd
|= cpu_to_be32(MLX5_MKEY_LEN64
);
3386 seg
->start_addr
= cpu_to_be64(umrwr
->virt_addr
);
3387 seg
->len
= cpu_to_be64(umrwr
->length
);
3388 seg
->log2_page_size
= umrwr
->page_shift
;
3389 seg
->qpn_mkey7_0
= cpu_to_be32(0xffffff00 |
3390 mlx5_mkey_variant(umrwr
->mkey
));
3393 static void set_reg_data_seg(struct mlx5_wqe_data_seg
*dseg
,
3394 struct mlx5_ib_mr
*mr
,
3395 struct mlx5_ib_pd
*pd
)
3397 int bcount
= mr
->desc_size
* mr
->ndescs
;
3399 dseg
->addr
= cpu_to_be64(mr
->desc_map
);
3400 dseg
->byte_count
= cpu_to_be32(ALIGN(bcount
, 64));
3401 dseg
->lkey
= cpu_to_be32(pd
->ibpd
.local_dma_lkey
);
3404 static __be32
send_ieth(struct ib_send_wr
*wr
)
3406 switch (wr
->opcode
) {
3407 case IB_WR_SEND_WITH_IMM
:
3408 case IB_WR_RDMA_WRITE_WITH_IMM
:
3409 return wr
->ex
.imm_data
;
3411 case IB_WR_SEND_WITH_INV
:
3412 return cpu_to_be32(wr
->ex
.invalidate_rkey
);
3419 static u8
calc_sig(void *wqe
, int size
)
3425 for (i
= 0; i
< size
; i
++)
3431 static u8
wq_sig(void *wqe
)
3433 return calc_sig(wqe
, (*((u8
*)wqe
+ 8) & 0x3f) << 4);
3436 static int set_data_inl_seg(struct mlx5_ib_qp
*qp
, struct ib_send_wr
*wr
,
3439 struct mlx5_wqe_inline_seg
*seg
;
3440 void *qend
= qp
->sq
.qend
;
3448 wqe
+= sizeof(*seg
);
3449 for (i
= 0; i
< wr
->num_sge
; i
++) {
3450 addr
= (void *)(unsigned long)(wr
->sg_list
[i
].addr
);
3451 len
= wr
->sg_list
[i
].length
;
3454 if (unlikely(inl
> qp
->max_inline_data
))
3457 if (unlikely(wqe
+ len
> qend
)) {
3459 memcpy(wqe
, addr
, copy
);
3462 wqe
= mlx5_get_send_wqe(qp
, 0);
3464 memcpy(wqe
, addr
, len
);
3468 seg
->byte_count
= cpu_to_be32(inl
| MLX5_INLINE_SEG
);
3470 *sz
= ALIGN(inl
+ sizeof(seg
->byte_count
), 16) / 16;
3475 static u16
prot_field_size(enum ib_signature_type type
)
3478 case IB_SIG_TYPE_T10_DIF
:
3479 return MLX5_DIF_SIZE
;
3485 static u8
bs_selector(int block_size
)
3487 switch (block_size
) {
3488 case 512: return 0x1;
3489 case 520: return 0x2;
3490 case 4096: return 0x3;
3491 case 4160: return 0x4;
3492 case 1073741824: return 0x5;
3497 static void mlx5_fill_inl_bsf(struct ib_sig_domain
*domain
,
3498 struct mlx5_bsf_inl
*inl
)
3500 /* Valid inline section and allow BSF refresh */
3501 inl
->vld_refresh
= cpu_to_be16(MLX5_BSF_INL_VALID
|
3502 MLX5_BSF_REFRESH_DIF
);
3503 inl
->dif_apptag
= cpu_to_be16(domain
->sig
.dif
.app_tag
);
3504 inl
->dif_reftag
= cpu_to_be32(domain
->sig
.dif
.ref_tag
);
3505 /* repeating block */
3506 inl
->rp_inv_seed
= MLX5_BSF_REPEAT_BLOCK
;
3507 inl
->sig_type
= domain
->sig
.dif
.bg_type
== IB_T10DIF_CRC
?
3508 MLX5_DIF_CRC
: MLX5_DIF_IPCS
;
3510 if (domain
->sig
.dif
.ref_remap
)
3511 inl
->dif_inc_ref_guard_check
|= MLX5_BSF_INC_REFTAG
;
3513 if (domain
->sig
.dif
.app_escape
) {
3514 if (domain
->sig
.dif
.ref_escape
)
3515 inl
->dif_inc_ref_guard_check
|= MLX5_BSF_APPREF_ESCAPE
;
3517 inl
->dif_inc_ref_guard_check
|= MLX5_BSF_APPTAG_ESCAPE
;
3520 inl
->dif_app_bitmask_check
=
3521 cpu_to_be16(domain
->sig
.dif
.apptag_check_mask
);
3524 static int mlx5_set_bsf(struct ib_mr
*sig_mr
,
3525 struct ib_sig_attrs
*sig_attrs
,
3526 struct mlx5_bsf
*bsf
, u32 data_size
)
3528 struct mlx5_core_sig_ctx
*msig
= to_mmr(sig_mr
)->sig
;
3529 struct mlx5_bsf_basic
*basic
= &bsf
->basic
;
3530 struct ib_sig_domain
*mem
= &sig_attrs
->mem
;
3531 struct ib_sig_domain
*wire
= &sig_attrs
->wire
;
3533 memset(bsf
, 0, sizeof(*bsf
));
3535 /* Basic + Extended + Inline */
3536 basic
->bsf_size_sbs
= 1 << 7;
3537 /* Input domain check byte mask */
3538 basic
->check_byte_mask
= sig_attrs
->check_mask
;
3539 basic
->raw_data_size
= cpu_to_be32(data_size
);
3542 switch (sig_attrs
->mem
.sig_type
) {
3543 case IB_SIG_TYPE_NONE
:
3545 case IB_SIG_TYPE_T10_DIF
:
3546 basic
->mem
.bs_selector
= bs_selector(mem
->sig
.dif
.pi_interval
);
3547 basic
->m_bfs_psv
= cpu_to_be32(msig
->psv_memory
.psv_idx
);
3548 mlx5_fill_inl_bsf(mem
, &bsf
->m_inl
);
3555 switch (sig_attrs
->wire
.sig_type
) {
3556 case IB_SIG_TYPE_NONE
:
3558 case IB_SIG_TYPE_T10_DIF
:
3559 if (mem
->sig
.dif
.pi_interval
== wire
->sig
.dif
.pi_interval
&&
3560 mem
->sig_type
== wire
->sig_type
) {
3561 /* Same block structure */
3562 basic
->bsf_size_sbs
|= 1 << 4;
3563 if (mem
->sig
.dif
.bg_type
== wire
->sig
.dif
.bg_type
)
3564 basic
->wire
.copy_byte_mask
|= MLX5_CPY_GRD_MASK
;
3565 if (mem
->sig
.dif
.app_tag
== wire
->sig
.dif
.app_tag
)
3566 basic
->wire
.copy_byte_mask
|= MLX5_CPY_APP_MASK
;
3567 if (mem
->sig
.dif
.ref_tag
== wire
->sig
.dif
.ref_tag
)
3568 basic
->wire
.copy_byte_mask
|= MLX5_CPY_REF_MASK
;
3570 basic
->wire
.bs_selector
= bs_selector(wire
->sig
.dif
.pi_interval
);
3572 basic
->w_bfs_psv
= cpu_to_be32(msig
->psv_wire
.psv_idx
);
3573 mlx5_fill_inl_bsf(wire
, &bsf
->w_inl
);
3582 static int set_sig_data_segment(struct ib_sig_handover_wr
*wr
,
3583 struct mlx5_ib_qp
*qp
, void **seg
, int *size
)
3585 struct ib_sig_attrs
*sig_attrs
= wr
->sig_attrs
;
3586 struct ib_mr
*sig_mr
= wr
->sig_mr
;
3587 struct mlx5_bsf
*bsf
;
3588 u32 data_len
= wr
->wr
.sg_list
->length
;
3589 u32 data_key
= wr
->wr
.sg_list
->lkey
;
3590 u64 data_va
= wr
->wr
.sg_list
->addr
;
3595 (data_key
== wr
->prot
->lkey
&&
3596 data_va
== wr
->prot
->addr
&&
3597 data_len
== wr
->prot
->length
)) {
3599 * Source domain doesn't contain signature information
3600 * or data and protection are interleaved in memory.
3601 * So need construct:
3602 * ------------------
3604 * ------------------
3606 * ------------------
3608 struct mlx5_klm
*data_klm
= *seg
;
3610 data_klm
->bcount
= cpu_to_be32(data_len
);
3611 data_klm
->key
= cpu_to_be32(data_key
);
3612 data_klm
->va
= cpu_to_be64(data_va
);
3613 wqe_size
= ALIGN(sizeof(*data_klm
), 64);
3616 * Source domain contains signature information
3617 * So need construct a strided block format:
3618 * ---------------------------
3619 * | stride_block_ctrl |
3620 * ---------------------------
3622 * ---------------------------
3624 * ---------------------------
3626 * ---------------------------
3628 struct mlx5_stride_block_ctrl_seg
*sblock_ctrl
;
3629 struct mlx5_stride_block_entry
*data_sentry
;
3630 struct mlx5_stride_block_entry
*prot_sentry
;
3631 u32 prot_key
= wr
->prot
->lkey
;
3632 u64 prot_va
= wr
->prot
->addr
;
3633 u16 block_size
= sig_attrs
->mem
.sig
.dif
.pi_interval
;
3637 data_sentry
= (void *)sblock_ctrl
+ sizeof(*sblock_ctrl
);
3638 prot_sentry
= (void *)data_sentry
+ sizeof(*data_sentry
);
3640 prot_size
= prot_field_size(sig_attrs
->mem
.sig_type
);
3642 pr_err("Bad block size given: %u\n", block_size
);
3645 sblock_ctrl
->bcount_per_cycle
= cpu_to_be32(block_size
+
3647 sblock_ctrl
->op
= cpu_to_be32(MLX5_STRIDE_BLOCK_OP
);
3648 sblock_ctrl
->repeat_count
= cpu_to_be32(data_len
/ block_size
);
3649 sblock_ctrl
->num_entries
= cpu_to_be16(2);
3651 data_sentry
->bcount
= cpu_to_be16(block_size
);
3652 data_sentry
->key
= cpu_to_be32(data_key
);
3653 data_sentry
->va
= cpu_to_be64(data_va
);
3654 data_sentry
->stride
= cpu_to_be16(block_size
);
3656 prot_sentry
->bcount
= cpu_to_be16(prot_size
);
3657 prot_sentry
->key
= cpu_to_be32(prot_key
);
3658 prot_sentry
->va
= cpu_to_be64(prot_va
);
3659 prot_sentry
->stride
= cpu_to_be16(prot_size
);
3661 wqe_size
= ALIGN(sizeof(*sblock_ctrl
) + sizeof(*data_sentry
) +
3662 sizeof(*prot_sentry
), 64);
3666 *size
+= wqe_size
/ 16;
3667 if (unlikely((*seg
== qp
->sq
.qend
)))
3668 *seg
= mlx5_get_send_wqe(qp
, 0);
3671 ret
= mlx5_set_bsf(sig_mr
, sig_attrs
, bsf
, data_len
);
3675 *seg
+= sizeof(*bsf
);
3676 *size
+= sizeof(*bsf
) / 16;
3677 if (unlikely((*seg
== qp
->sq
.qend
)))
3678 *seg
= mlx5_get_send_wqe(qp
, 0);
3683 static void set_sig_mkey_segment(struct mlx5_mkey_seg
*seg
,
3684 struct ib_sig_handover_wr
*wr
, u32 size
,
3685 u32 length
, u32 pdn
)
3687 struct ib_mr
*sig_mr
= wr
->sig_mr
;
3688 u32 sig_key
= sig_mr
->rkey
;
3689 u8 sigerr
= to_mmr(sig_mr
)->sig
->sigerr_count
& 1;
3691 memset(seg
, 0, sizeof(*seg
));
3693 seg
->flags
= get_umr_flags(wr
->access_flags
) |
3694 MLX5_MKC_ACCESS_MODE_KLMS
;
3695 seg
->qpn_mkey7_0
= cpu_to_be32((sig_key
& 0xff) | 0xffffff00);
3696 seg
->flags_pd
= cpu_to_be32(MLX5_MKEY_REMOTE_INVAL
| sigerr
<< 26 |
3697 MLX5_MKEY_BSF_EN
| pdn
);
3698 seg
->len
= cpu_to_be64(length
);
3699 seg
->xlt_oct_size
= cpu_to_be32(get_xlt_octo(size
));
3700 seg
->bsfs_octo_size
= cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE
);
3703 static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg
*umr
,
3706 memset(umr
, 0, sizeof(*umr
));
3708 umr
->flags
= MLX5_FLAGS_INLINE
| MLX5_FLAGS_CHECK_FREE
;
3709 umr
->xlt_octowords
= cpu_to_be16(get_xlt_octo(size
));
3710 umr
->bsf_octowords
= cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE
);
3711 umr
->mkey_mask
= sig_mkey_mask();
3715 static int set_sig_umr_wr(struct ib_send_wr
*send_wr
, struct mlx5_ib_qp
*qp
,
3716 void **seg
, int *size
)
3718 struct ib_sig_handover_wr
*wr
= sig_handover_wr(send_wr
);
3719 struct mlx5_ib_mr
*sig_mr
= to_mmr(wr
->sig_mr
);
3720 u32 pdn
= get_pd(qp
)->pdn
;
3722 int region_len
, ret
;
3724 if (unlikely(wr
->wr
.num_sge
!= 1) ||
3725 unlikely(wr
->access_flags
& IB_ACCESS_REMOTE_ATOMIC
) ||
3726 unlikely(!sig_mr
->sig
) || unlikely(!qp
->signature_en
) ||
3727 unlikely(!sig_mr
->sig
->sig_status_checked
))
3730 /* length of the protected region, data + protection */
3731 region_len
= wr
->wr
.sg_list
->length
;
3733 (wr
->prot
->lkey
!= wr
->wr
.sg_list
->lkey
||
3734 wr
->prot
->addr
!= wr
->wr
.sg_list
->addr
||
3735 wr
->prot
->length
!= wr
->wr
.sg_list
->length
))
3736 region_len
+= wr
->prot
->length
;
3739 * KLM octoword size - if protection was provided
3740 * then we use strided block format (3 octowords),
3741 * else we use single KLM (1 octoword)
3743 xlt_size
= wr
->prot
? 0x30 : sizeof(struct mlx5_klm
);
3745 set_sig_umr_segment(*seg
, xlt_size
);
3746 *seg
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
);
3747 *size
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
) / 16;
3748 if (unlikely((*seg
== qp
->sq
.qend
)))
3749 *seg
= mlx5_get_send_wqe(qp
, 0);
3751 set_sig_mkey_segment(*seg
, wr
, xlt_size
, region_len
, pdn
);
3752 *seg
+= sizeof(struct mlx5_mkey_seg
);
3753 *size
+= sizeof(struct mlx5_mkey_seg
) / 16;
3754 if (unlikely((*seg
== qp
->sq
.qend
)))
3755 *seg
= mlx5_get_send_wqe(qp
, 0);
3757 ret
= set_sig_data_segment(wr
, qp
, seg
, size
);
3761 sig_mr
->sig
->sig_status_checked
= false;
3765 static int set_psv_wr(struct ib_sig_domain
*domain
,
3766 u32 psv_idx
, void **seg
, int *size
)
3768 struct mlx5_seg_set_psv
*psv_seg
= *seg
;
3770 memset(psv_seg
, 0, sizeof(*psv_seg
));
3771 psv_seg
->psv_num
= cpu_to_be32(psv_idx
);
3772 switch (domain
->sig_type
) {
3773 case IB_SIG_TYPE_NONE
:
3775 case IB_SIG_TYPE_T10_DIF
:
3776 psv_seg
->transient_sig
= cpu_to_be32(domain
->sig
.dif
.bg
<< 16 |
3777 domain
->sig
.dif
.app_tag
);
3778 psv_seg
->ref_tag
= cpu_to_be32(domain
->sig
.dif
.ref_tag
);
3781 pr_err("Bad signature type (%d) is given.\n",
3786 *seg
+= sizeof(*psv_seg
);
3787 *size
+= sizeof(*psv_seg
) / 16;
3792 static int set_reg_wr(struct mlx5_ib_qp
*qp
,
3793 struct ib_reg_wr
*wr
,
3794 void **seg
, int *size
)
3796 struct mlx5_ib_mr
*mr
= to_mmr(wr
->mr
);
3797 struct mlx5_ib_pd
*pd
= to_mpd(qp
->ibqp
.pd
);
3799 if (unlikely(wr
->wr
.send_flags
& IB_SEND_INLINE
)) {
3800 mlx5_ib_warn(to_mdev(qp
->ibqp
.device
),
3801 "Invalid IB_SEND_INLINE send flag\n");
3805 set_reg_umr_seg(*seg
, mr
);
3806 *seg
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
);
3807 *size
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
) / 16;
3808 if (unlikely((*seg
== qp
->sq
.qend
)))
3809 *seg
= mlx5_get_send_wqe(qp
, 0);
3811 set_reg_mkey_seg(*seg
, mr
, wr
->key
, wr
->access
);
3812 *seg
+= sizeof(struct mlx5_mkey_seg
);
3813 *size
+= sizeof(struct mlx5_mkey_seg
) / 16;
3814 if (unlikely((*seg
== qp
->sq
.qend
)))
3815 *seg
= mlx5_get_send_wqe(qp
, 0);
3817 set_reg_data_seg(*seg
, mr
, pd
);
3818 *seg
+= sizeof(struct mlx5_wqe_data_seg
);
3819 *size
+= (sizeof(struct mlx5_wqe_data_seg
) / 16);
3824 static void set_linv_wr(struct mlx5_ib_qp
*qp
, void **seg
, int *size
)
3826 set_linv_umr_seg(*seg
);
3827 *seg
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
);
3828 *size
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
) / 16;
3829 if (unlikely((*seg
== qp
->sq
.qend
)))
3830 *seg
= mlx5_get_send_wqe(qp
, 0);
3831 set_linv_mkey_seg(*seg
);
3832 *seg
+= sizeof(struct mlx5_mkey_seg
);
3833 *size
+= sizeof(struct mlx5_mkey_seg
) / 16;
3834 if (unlikely((*seg
== qp
->sq
.qend
)))
3835 *seg
= mlx5_get_send_wqe(qp
, 0);
3838 static void dump_wqe(struct mlx5_ib_qp
*qp
, int idx
, int size_16
)
3844 pr_debug("dump wqe at %p\n", mlx5_get_send_wqe(qp
, tidx
));
3845 for (i
= 0, j
= 0; i
< size_16
* 4; i
+= 4, j
+= 4) {
3846 if ((i
& 0xf) == 0) {
3847 void *buf
= mlx5_get_send_wqe(qp
, tidx
);
3848 tidx
= (tidx
+ 1) & (qp
->sq
.wqe_cnt
- 1);
3852 pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p
[j
]),
3853 be32_to_cpu(p
[j
+ 1]), be32_to_cpu(p
[j
+ 2]),
3854 be32_to_cpu(p
[j
+ 3]));
3858 static int begin_wqe(struct mlx5_ib_qp
*qp
, void **seg
,
3859 struct mlx5_wqe_ctrl_seg
**ctrl
,
3860 struct ib_send_wr
*wr
, unsigned *idx
,
3861 int *size
, int nreq
)
3863 if (unlikely(mlx5_wq_overflow(&qp
->sq
, nreq
, qp
->ibqp
.send_cq
)))
3866 *idx
= qp
->sq
.cur_post
& (qp
->sq
.wqe_cnt
- 1);
3867 *seg
= mlx5_get_send_wqe(qp
, *idx
);
3869 *(uint32_t *)(*seg
+ 8) = 0;
3870 (*ctrl
)->imm
= send_ieth(wr
);
3871 (*ctrl
)->fm_ce_se
= qp
->sq_signal_bits
|
3872 (wr
->send_flags
& IB_SEND_SIGNALED
?
3873 MLX5_WQE_CTRL_CQ_UPDATE
: 0) |
3874 (wr
->send_flags
& IB_SEND_SOLICITED
?
3875 MLX5_WQE_CTRL_SOLICITED
: 0);
3877 *seg
+= sizeof(**ctrl
);
3878 *size
= sizeof(**ctrl
) / 16;
3883 static void finish_wqe(struct mlx5_ib_qp
*qp
,
3884 struct mlx5_wqe_ctrl_seg
*ctrl
,
3885 u8 size
, unsigned idx
, u64 wr_id
,
3886 int nreq
, u8 fence
, u32 mlx5_opcode
)
3890 ctrl
->opmod_idx_opcode
= cpu_to_be32(((u32
)(qp
->sq
.cur_post
) << 8) |
3891 mlx5_opcode
| ((u32
)opmod
<< 24));
3892 ctrl
->qpn_ds
= cpu_to_be32(size
| (qp
->trans_qp
.base
.mqp
.qpn
<< 8));
3893 ctrl
->fm_ce_se
|= fence
;
3894 if (unlikely(qp
->wq_sig
))
3895 ctrl
->signature
= wq_sig(ctrl
);
3897 qp
->sq
.wrid
[idx
] = wr_id
;
3898 qp
->sq
.w_list
[idx
].opcode
= mlx5_opcode
;
3899 qp
->sq
.wqe_head
[idx
] = qp
->sq
.head
+ nreq
;
3900 qp
->sq
.cur_post
+= DIV_ROUND_UP(size
* 16, MLX5_SEND_WQE_BB
);
3901 qp
->sq
.w_list
[idx
].next
= qp
->sq
.cur_post
;
3905 int mlx5_ib_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
3906 struct ib_send_wr
**bad_wr
)
3908 struct mlx5_wqe_ctrl_seg
*ctrl
= NULL
; /* compiler warning */
3909 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
3910 struct mlx5_core_dev
*mdev
= dev
->mdev
;
3911 struct mlx5_ib_qp
*qp
;
3912 struct mlx5_ib_mr
*mr
;
3913 struct mlx5_wqe_data_seg
*dpseg
;
3914 struct mlx5_wqe_xrc_seg
*xrc
;
3916 int uninitialized_var(size
);
3918 unsigned long flags
;
3928 if (unlikely(ibqp
->qp_type
== IB_QPT_GSI
))
3929 return mlx5_ib_gsi_post_send(ibqp
, wr
, bad_wr
);
3935 spin_lock_irqsave(&qp
->sq
.lock
, flags
);
3937 if (mdev
->state
== MLX5_DEVICE_STATE_INTERNAL_ERROR
) {
3944 for (nreq
= 0; wr
; nreq
++, wr
= wr
->next
) {
3945 if (unlikely(wr
->opcode
>= ARRAY_SIZE(mlx5_ib_opcode
))) {
3946 mlx5_ib_warn(dev
, "\n");
3952 num_sge
= wr
->num_sge
;
3953 if (unlikely(num_sge
> qp
->sq
.max_gs
)) {
3954 mlx5_ib_warn(dev
, "\n");
3960 err
= begin_wqe(qp
, &seg
, &ctrl
, wr
, &idx
, &size
, nreq
);
3962 mlx5_ib_warn(dev
, "\n");
3968 if (wr
->opcode
== IB_WR_LOCAL_INV
||
3969 wr
->opcode
== IB_WR_REG_MR
) {
3970 fence
= dev
->umr_fence
;
3971 next_fence
= MLX5_FENCE_MODE_INITIATOR_SMALL
;
3972 } else if (wr
->send_flags
& IB_SEND_FENCE
) {
3974 fence
= MLX5_FENCE_MODE_SMALL_AND_FENCE
;
3976 fence
= MLX5_FENCE_MODE_FENCE
;
3978 fence
= qp
->next_fence
;
3981 switch (ibqp
->qp_type
) {
3982 case IB_QPT_XRC_INI
:
3984 seg
+= sizeof(*xrc
);
3985 size
+= sizeof(*xrc
) / 16;
3988 switch (wr
->opcode
) {
3989 case IB_WR_RDMA_READ
:
3990 case IB_WR_RDMA_WRITE
:
3991 case IB_WR_RDMA_WRITE_WITH_IMM
:
3992 set_raddr_seg(seg
, rdma_wr(wr
)->remote_addr
,
3994 seg
+= sizeof(struct mlx5_wqe_raddr_seg
);
3995 size
+= sizeof(struct mlx5_wqe_raddr_seg
) / 16;
3998 case IB_WR_ATOMIC_CMP_AND_SWP
:
3999 case IB_WR_ATOMIC_FETCH_AND_ADD
:
4000 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP
:
4001 mlx5_ib_warn(dev
, "Atomic operations are not supported yet\n");
4006 case IB_WR_LOCAL_INV
:
4007 qp
->sq
.wr_data
[idx
] = IB_WR_LOCAL_INV
;
4008 ctrl
->imm
= cpu_to_be32(wr
->ex
.invalidate_rkey
);
4009 set_linv_wr(qp
, &seg
, &size
);
4014 qp
->sq
.wr_data
[idx
] = IB_WR_REG_MR
;
4015 ctrl
->imm
= cpu_to_be32(reg_wr(wr
)->key
);
4016 err
= set_reg_wr(qp
, reg_wr(wr
), &seg
, &size
);
4024 case IB_WR_REG_SIG_MR
:
4025 qp
->sq
.wr_data
[idx
] = IB_WR_REG_SIG_MR
;
4026 mr
= to_mmr(sig_handover_wr(wr
)->sig_mr
);
4028 ctrl
->imm
= cpu_to_be32(mr
->ibmr
.rkey
);
4029 err
= set_sig_umr_wr(wr
, qp
, &seg
, &size
);
4031 mlx5_ib_warn(dev
, "\n");
4036 finish_wqe(qp
, ctrl
, size
, idx
, wr
->wr_id
, nreq
,
4037 fence
, MLX5_OPCODE_UMR
);
4039 * SET_PSV WQEs are not signaled and solicited
4042 wr
->send_flags
&= ~IB_SEND_SIGNALED
;
4043 wr
->send_flags
|= IB_SEND_SOLICITED
;
4044 err
= begin_wqe(qp
, &seg
, &ctrl
, wr
,
4047 mlx5_ib_warn(dev
, "\n");
4053 err
= set_psv_wr(&sig_handover_wr(wr
)->sig_attrs
->mem
,
4054 mr
->sig
->psv_memory
.psv_idx
, &seg
,
4057 mlx5_ib_warn(dev
, "\n");
4062 finish_wqe(qp
, ctrl
, size
, idx
, wr
->wr_id
, nreq
,
4063 fence
, MLX5_OPCODE_SET_PSV
);
4064 err
= begin_wqe(qp
, &seg
, &ctrl
, wr
,
4067 mlx5_ib_warn(dev
, "\n");
4073 err
= set_psv_wr(&sig_handover_wr(wr
)->sig_attrs
->wire
,
4074 mr
->sig
->psv_wire
.psv_idx
, &seg
,
4077 mlx5_ib_warn(dev
, "\n");
4082 finish_wqe(qp
, ctrl
, size
, idx
, wr
->wr_id
, nreq
,
4083 fence
, MLX5_OPCODE_SET_PSV
);
4084 qp
->next_fence
= MLX5_FENCE_MODE_INITIATOR_SMALL
;
4094 switch (wr
->opcode
) {
4095 case IB_WR_RDMA_WRITE
:
4096 case IB_WR_RDMA_WRITE_WITH_IMM
:
4097 set_raddr_seg(seg
, rdma_wr(wr
)->remote_addr
,
4099 seg
+= sizeof(struct mlx5_wqe_raddr_seg
);
4100 size
+= sizeof(struct mlx5_wqe_raddr_seg
) / 16;
4109 if (unlikely(!mdev
->port_caps
[qp
->port
- 1].has_smi
)) {
4110 mlx5_ib_warn(dev
, "Send SMP MADs is not allowed\n");
4116 case MLX5_IB_QPT_HW_GSI
:
4117 set_datagram_seg(seg
, wr
);
4118 seg
+= sizeof(struct mlx5_wqe_datagram_seg
);
4119 size
+= sizeof(struct mlx5_wqe_datagram_seg
) / 16;
4120 if (unlikely((seg
== qend
)))
4121 seg
= mlx5_get_send_wqe(qp
, 0);
4124 set_datagram_seg(seg
, wr
);
4125 seg
+= sizeof(struct mlx5_wqe_datagram_seg
);
4126 size
+= sizeof(struct mlx5_wqe_datagram_seg
) / 16;
4128 if (unlikely((seg
== qend
)))
4129 seg
= mlx5_get_send_wqe(qp
, 0);
4131 /* handle qp that supports ud offload */
4132 if (qp
->flags
& IB_QP_CREATE_IPOIB_UD_LSO
) {
4133 struct mlx5_wqe_eth_pad
*pad
;
4136 memset(pad
, 0, sizeof(struct mlx5_wqe_eth_pad
));
4137 seg
+= sizeof(struct mlx5_wqe_eth_pad
);
4138 size
+= sizeof(struct mlx5_wqe_eth_pad
) / 16;
4140 seg
= set_eth_seg(seg
, wr
, qend
, qp
, &size
);
4142 if (unlikely((seg
== qend
)))
4143 seg
= mlx5_get_send_wqe(qp
, 0);
4146 case MLX5_IB_QPT_REG_UMR
:
4147 if (wr
->opcode
!= MLX5_IB_WR_UMR
) {
4149 mlx5_ib_warn(dev
, "bad opcode\n");
4152 qp
->sq
.wr_data
[idx
] = MLX5_IB_WR_UMR
;
4153 ctrl
->imm
= cpu_to_be32(umr_wr(wr
)->mkey
);
4154 set_reg_umr_segment(seg
, wr
, !!(MLX5_CAP_GEN(mdev
, atomic
)));
4155 seg
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
);
4156 size
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
) / 16;
4157 if (unlikely((seg
== qend
)))
4158 seg
= mlx5_get_send_wqe(qp
, 0);
4159 set_reg_mkey_segment(seg
, wr
);
4160 seg
+= sizeof(struct mlx5_mkey_seg
);
4161 size
+= sizeof(struct mlx5_mkey_seg
) / 16;
4162 if (unlikely((seg
== qend
)))
4163 seg
= mlx5_get_send_wqe(qp
, 0);
4170 if (wr
->send_flags
& IB_SEND_INLINE
&& num_sge
) {
4171 int uninitialized_var(sz
);
4173 err
= set_data_inl_seg(qp
, wr
, seg
, &sz
);
4174 if (unlikely(err
)) {
4175 mlx5_ib_warn(dev
, "\n");
4182 for (i
= 0; i
< num_sge
; i
++) {
4183 if (unlikely(dpseg
== qend
)) {
4184 seg
= mlx5_get_send_wqe(qp
, 0);
4187 if (likely(wr
->sg_list
[i
].length
)) {
4188 set_data_ptr_seg(dpseg
, wr
->sg_list
+ i
);
4189 size
+= sizeof(struct mlx5_wqe_data_seg
) / 16;
4195 qp
->next_fence
= next_fence
;
4196 finish_wqe(qp
, ctrl
, size
, idx
, wr
->wr_id
, nreq
, fence
,
4197 mlx5_ib_opcode
[wr
->opcode
]);
4200 dump_wqe(qp
, idx
, size
);
4205 qp
->sq
.head
+= nreq
;
4207 /* Make sure that descriptors are written before
4208 * updating doorbell record and ringing the doorbell
4212 qp
->db
.db
[MLX5_SND_DBR
] = cpu_to_be32(qp
->sq
.cur_post
);
4214 /* Make sure doorbell record is visible to the HCA before
4215 * we hit doorbell */
4218 /* currently we support only regular doorbells */
4219 mlx5_write64((__be32
*)ctrl
, bf
->bfreg
->map
+ bf
->offset
, NULL
);
4220 /* Make sure doorbells don't leak out of SQ spinlock
4221 * and reach the HCA out of order.
4224 bf
->offset
^= bf
->buf_size
;
4227 spin_unlock_irqrestore(&qp
->sq
.lock
, flags
);
4232 static void set_sig_seg(struct mlx5_rwqe_sig
*sig
, int size
)
4234 sig
->signature
= calc_sig(sig
, size
);
4237 int mlx5_ib_post_recv(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
4238 struct ib_recv_wr
**bad_wr
)
4240 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
4241 struct mlx5_wqe_data_seg
*scat
;
4242 struct mlx5_rwqe_sig
*sig
;
4243 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
4244 struct mlx5_core_dev
*mdev
= dev
->mdev
;
4245 unsigned long flags
;
4251 if (unlikely(ibqp
->qp_type
== IB_QPT_GSI
))
4252 return mlx5_ib_gsi_post_recv(ibqp
, wr
, bad_wr
);
4254 spin_lock_irqsave(&qp
->rq
.lock
, flags
);
4256 if (mdev
->state
== MLX5_DEVICE_STATE_INTERNAL_ERROR
) {
4263 ind
= qp
->rq
.head
& (qp
->rq
.wqe_cnt
- 1);
4265 for (nreq
= 0; wr
; nreq
++, wr
= wr
->next
) {
4266 if (mlx5_wq_overflow(&qp
->rq
, nreq
, qp
->ibqp
.recv_cq
)) {
4272 if (unlikely(wr
->num_sge
> qp
->rq
.max_gs
)) {
4278 scat
= get_recv_wqe(qp
, ind
);
4282 for (i
= 0; i
< wr
->num_sge
; i
++)
4283 set_data_ptr_seg(scat
+ i
, wr
->sg_list
+ i
);
4285 if (i
< qp
->rq
.max_gs
) {
4286 scat
[i
].byte_count
= 0;
4287 scat
[i
].lkey
= cpu_to_be32(MLX5_INVALID_LKEY
);
4292 sig
= (struct mlx5_rwqe_sig
*)scat
;
4293 set_sig_seg(sig
, (qp
->rq
.max_gs
+ 1) << 2);
4296 qp
->rq
.wrid
[ind
] = wr
->wr_id
;
4298 ind
= (ind
+ 1) & (qp
->rq
.wqe_cnt
- 1);
4303 qp
->rq
.head
+= nreq
;
4305 /* Make sure that descriptors are written before
4310 *qp
->db
.db
= cpu_to_be32(qp
->rq
.head
& 0xffff);
4313 spin_unlock_irqrestore(&qp
->rq
.lock
, flags
);
4318 static inline enum ib_qp_state
to_ib_qp_state(enum mlx5_qp_state mlx5_state
)
4320 switch (mlx5_state
) {
4321 case MLX5_QP_STATE_RST
: return IB_QPS_RESET
;
4322 case MLX5_QP_STATE_INIT
: return IB_QPS_INIT
;
4323 case MLX5_QP_STATE_RTR
: return IB_QPS_RTR
;
4324 case MLX5_QP_STATE_RTS
: return IB_QPS_RTS
;
4325 case MLX5_QP_STATE_SQ_DRAINING
:
4326 case MLX5_QP_STATE_SQD
: return IB_QPS_SQD
;
4327 case MLX5_QP_STATE_SQER
: return IB_QPS_SQE
;
4328 case MLX5_QP_STATE_ERR
: return IB_QPS_ERR
;
4333 static inline enum ib_mig_state
to_ib_mig_state(int mlx5_mig_state
)
4335 switch (mlx5_mig_state
) {
4336 case MLX5_QP_PM_ARMED
: return IB_MIG_ARMED
;
4337 case MLX5_QP_PM_REARM
: return IB_MIG_REARM
;
4338 case MLX5_QP_PM_MIGRATED
: return IB_MIG_MIGRATED
;
4343 static int to_ib_qp_access_flags(int mlx5_flags
)
4347 if (mlx5_flags
& MLX5_QP_BIT_RRE
)
4348 ib_flags
|= IB_ACCESS_REMOTE_READ
;
4349 if (mlx5_flags
& MLX5_QP_BIT_RWE
)
4350 ib_flags
|= IB_ACCESS_REMOTE_WRITE
;
4351 if (mlx5_flags
& MLX5_QP_BIT_RAE
)
4352 ib_flags
|= IB_ACCESS_REMOTE_ATOMIC
;
4357 static void to_rdma_ah_attr(struct mlx5_ib_dev
*ibdev
,
4358 struct rdma_ah_attr
*ah_attr
,
4359 struct mlx5_qp_path
*path
)
4361 struct mlx5_core_dev
*dev
= ibdev
->mdev
;
4363 memset(ah_attr
, 0, sizeof(*ah_attr
));
4365 ah_attr
->type
= rdma_ah_find_type(&ibdev
->ib_dev
, path
->port
);
4366 rdma_ah_set_port_num(ah_attr
, path
->port
);
4367 if (rdma_ah_get_port_num(ah_attr
) == 0 ||
4368 rdma_ah_get_port_num(ah_attr
) > MLX5_CAP_GEN(dev
, num_ports
))
4371 rdma_ah_set_port_num(ah_attr
, path
->port
);
4372 rdma_ah_set_sl(ah_attr
, path
->dci_cfi_prio_sl
& 0xf);
4374 rdma_ah_set_dlid(ah_attr
, be16_to_cpu(path
->rlid
));
4375 rdma_ah_set_path_bits(ah_attr
, path
->grh_mlid
& 0x7f);
4376 rdma_ah_set_static_rate(ah_attr
,
4377 path
->static_rate
? path
->static_rate
- 5 : 0);
4378 if (path
->grh_mlid
& (1 << 7)) {
4379 u32 tc_fl
= be32_to_cpu(path
->tclass_flowlabel
);
4381 rdma_ah_set_grh(ah_attr
, NULL
,
4385 (tc_fl
>> 20) & 0xff);
4386 rdma_ah_set_dgid_raw(ah_attr
, path
->rgid
);
4390 static int query_raw_packet_qp_sq_state(struct mlx5_ib_dev
*dev
,
4391 struct mlx5_ib_sq
*sq
,
4399 inlen
= MLX5_ST_SZ_BYTES(query_sq_out
);
4400 out
= kvzalloc(inlen
, GFP_KERNEL
);
4404 err
= mlx5_core_query_sq(dev
->mdev
, sq
->base
.mqp
.qpn
, out
);
4408 sqc
= MLX5_ADDR_OF(query_sq_out
, out
, sq_context
);
4409 *sq_state
= MLX5_GET(sqc
, sqc
, state
);
4410 sq
->state
= *sq_state
;
4417 static int query_raw_packet_qp_rq_state(struct mlx5_ib_dev
*dev
,
4418 struct mlx5_ib_rq
*rq
,
4426 inlen
= MLX5_ST_SZ_BYTES(query_rq_out
);
4427 out
= kvzalloc(inlen
, GFP_KERNEL
);
4431 err
= mlx5_core_query_rq(dev
->mdev
, rq
->base
.mqp
.qpn
, out
);
4435 rqc
= MLX5_ADDR_OF(query_rq_out
, out
, rq_context
);
4436 *rq_state
= MLX5_GET(rqc
, rqc
, state
);
4437 rq
->state
= *rq_state
;
4444 static int sqrq_state_to_qp_state(u8 sq_state
, u8 rq_state
,
4445 struct mlx5_ib_qp
*qp
, u8
*qp_state
)
4447 static const u8 sqrq_trans
[MLX5_RQ_NUM_STATE
][MLX5_SQ_NUM_STATE
] = {
4448 [MLX5_RQC_STATE_RST
] = {
4449 [MLX5_SQC_STATE_RST
] = IB_QPS_RESET
,
4450 [MLX5_SQC_STATE_RDY
] = MLX5_QP_STATE_BAD
,
4451 [MLX5_SQC_STATE_ERR
] = MLX5_QP_STATE_BAD
,
4452 [MLX5_SQ_STATE_NA
] = IB_QPS_RESET
,
4454 [MLX5_RQC_STATE_RDY
] = {
4455 [MLX5_SQC_STATE_RST
] = MLX5_QP_STATE_BAD
,
4456 [MLX5_SQC_STATE_RDY
] = MLX5_QP_STATE
,
4457 [MLX5_SQC_STATE_ERR
] = IB_QPS_SQE
,
4458 [MLX5_SQ_STATE_NA
] = MLX5_QP_STATE
,
4460 [MLX5_RQC_STATE_ERR
] = {
4461 [MLX5_SQC_STATE_RST
] = MLX5_QP_STATE_BAD
,
4462 [MLX5_SQC_STATE_RDY
] = MLX5_QP_STATE_BAD
,
4463 [MLX5_SQC_STATE_ERR
] = IB_QPS_ERR
,
4464 [MLX5_SQ_STATE_NA
] = IB_QPS_ERR
,
4466 [MLX5_RQ_STATE_NA
] = {
4467 [MLX5_SQC_STATE_RST
] = IB_QPS_RESET
,
4468 [MLX5_SQC_STATE_RDY
] = MLX5_QP_STATE
,
4469 [MLX5_SQC_STATE_ERR
] = MLX5_QP_STATE
,
4470 [MLX5_SQ_STATE_NA
] = MLX5_QP_STATE_BAD
,
4474 *qp_state
= sqrq_trans
[rq_state
][sq_state
];
4476 if (*qp_state
== MLX5_QP_STATE_BAD
) {
4477 WARN(1, "Buggy Raw Packet QP state, SQ 0x%x state: 0x%x, RQ 0x%x state: 0x%x",
4478 qp
->raw_packet_qp
.sq
.base
.mqp
.qpn
, sq_state
,
4479 qp
->raw_packet_qp
.rq
.base
.mqp
.qpn
, rq_state
);
4483 if (*qp_state
== MLX5_QP_STATE
)
4484 *qp_state
= qp
->state
;
4489 static int query_raw_packet_qp_state(struct mlx5_ib_dev
*dev
,
4490 struct mlx5_ib_qp
*qp
,
4491 u8
*raw_packet_qp_state
)
4493 struct mlx5_ib_raw_packet_qp
*raw_packet_qp
= &qp
->raw_packet_qp
;
4494 struct mlx5_ib_sq
*sq
= &raw_packet_qp
->sq
;
4495 struct mlx5_ib_rq
*rq
= &raw_packet_qp
->rq
;
4497 u8 sq_state
= MLX5_SQ_STATE_NA
;
4498 u8 rq_state
= MLX5_RQ_STATE_NA
;
4500 if (qp
->sq
.wqe_cnt
) {
4501 err
= query_raw_packet_qp_sq_state(dev
, sq
, &sq_state
);
4506 if (qp
->rq
.wqe_cnt
) {
4507 err
= query_raw_packet_qp_rq_state(dev
, rq
, &rq_state
);
4512 return sqrq_state_to_qp_state(sq_state
, rq_state
, qp
,
4513 raw_packet_qp_state
);
4516 static int query_qp_attr(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
4517 struct ib_qp_attr
*qp_attr
)
4519 int outlen
= MLX5_ST_SZ_BYTES(query_qp_out
);
4520 struct mlx5_qp_context
*context
;
4525 outb
= kzalloc(outlen
, GFP_KERNEL
);
4529 err
= mlx5_core_qp_query(dev
->mdev
, &qp
->trans_qp
.base
.mqp
, outb
,
4534 /* FIXME: use MLX5_GET rather than mlx5_qp_context manual struct */
4535 context
= (struct mlx5_qp_context
*)MLX5_ADDR_OF(query_qp_out
, outb
, qpc
);
4537 mlx5_state
= be32_to_cpu(context
->flags
) >> 28;
4539 qp
->state
= to_ib_qp_state(mlx5_state
);
4540 qp_attr
->path_mtu
= context
->mtu_msgmax
>> 5;
4541 qp_attr
->path_mig_state
=
4542 to_ib_mig_state((be32_to_cpu(context
->flags
) >> 11) & 0x3);
4543 qp_attr
->qkey
= be32_to_cpu(context
->qkey
);
4544 qp_attr
->rq_psn
= be32_to_cpu(context
->rnr_nextrecvpsn
) & 0xffffff;
4545 qp_attr
->sq_psn
= be32_to_cpu(context
->next_send_psn
) & 0xffffff;
4546 qp_attr
->dest_qp_num
= be32_to_cpu(context
->log_pg_sz_remote_qpn
) & 0xffffff;
4547 qp_attr
->qp_access_flags
=
4548 to_ib_qp_access_flags(be32_to_cpu(context
->params2
));
4550 if (qp
->ibqp
.qp_type
== IB_QPT_RC
|| qp
->ibqp
.qp_type
== IB_QPT_UC
) {
4551 to_rdma_ah_attr(dev
, &qp_attr
->ah_attr
, &context
->pri_path
);
4552 to_rdma_ah_attr(dev
, &qp_attr
->alt_ah_attr
, &context
->alt_path
);
4553 qp_attr
->alt_pkey_index
=
4554 be16_to_cpu(context
->alt_path
.pkey_index
);
4555 qp_attr
->alt_port_num
=
4556 rdma_ah_get_port_num(&qp_attr
->alt_ah_attr
);
4559 qp_attr
->pkey_index
= be16_to_cpu(context
->pri_path
.pkey_index
);
4560 qp_attr
->port_num
= context
->pri_path
.port
;
4562 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
4563 qp_attr
->sq_draining
= mlx5_state
== MLX5_QP_STATE_SQ_DRAINING
;
4565 qp_attr
->max_rd_atomic
= 1 << ((be32_to_cpu(context
->params1
) >> 21) & 0x7);
4567 qp_attr
->max_dest_rd_atomic
=
4568 1 << ((be32_to_cpu(context
->params2
) >> 21) & 0x7);
4569 qp_attr
->min_rnr_timer
=
4570 (be32_to_cpu(context
->rnr_nextrecvpsn
) >> 24) & 0x1f;
4571 qp_attr
->timeout
= context
->pri_path
.ackto_lt
>> 3;
4572 qp_attr
->retry_cnt
= (be32_to_cpu(context
->params1
) >> 16) & 0x7;
4573 qp_attr
->rnr_retry
= (be32_to_cpu(context
->params1
) >> 13) & 0x7;
4574 qp_attr
->alt_timeout
= context
->alt_path
.ackto_lt
>> 3;
4581 int mlx5_ib_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*qp_attr
,
4582 int qp_attr_mask
, struct ib_qp_init_attr
*qp_init_attr
)
4584 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
4585 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
4587 u8 raw_packet_qp_state
;
4589 if (ibqp
->rwq_ind_tbl
)
4592 if (unlikely(ibqp
->qp_type
== IB_QPT_GSI
))
4593 return mlx5_ib_gsi_query_qp(ibqp
, qp_attr
, qp_attr_mask
,
4596 /* Not all of output fields are applicable, make sure to zero them */
4597 memset(qp_init_attr
, 0, sizeof(*qp_init_attr
));
4598 memset(qp_attr
, 0, sizeof(*qp_attr
));
4600 mutex_lock(&qp
->mutex
);
4602 if (qp
->ibqp
.qp_type
== IB_QPT_RAW_PACKET
||
4603 qp
->flags
& MLX5_IB_QP_UNDERLAY
) {
4604 err
= query_raw_packet_qp_state(dev
, qp
, &raw_packet_qp_state
);
4607 qp
->state
= raw_packet_qp_state
;
4608 qp_attr
->port_num
= 1;
4610 err
= query_qp_attr(dev
, qp
, qp_attr
);
4615 qp_attr
->qp_state
= qp
->state
;
4616 qp_attr
->cur_qp_state
= qp_attr
->qp_state
;
4617 qp_attr
->cap
.max_recv_wr
= qp
->rq
.wqe_cnt
;
4618 qp_attr
->cap
.max_recv_sge
= qp
->rq
.max_gs
;
4620 if (!ibqp
->uobject
) {
4621 qp_attr
->cap
.max_send_wr
= qp
->sq
.max_post
;
4622 qp_attr
->cap
.max_send_sge
= qp
->sq
.max_gs
;
4623 qp_init_attr
->qp_context
= ibqp
->qp_context
;
4625 qp_attr
->cap
.max_send_wr
= 0;
4626 qp_attr
->cap
.max_send_sge
= 0;
4629 qp_init_attr
->qp_type
= ibqp
->qp_type
;
4630 qp_init_attr
->recv_cq
= ibqp
->recv_cq
;
4631 qp_init_attr
->send_cq
= ibqp
->send_cq
;
4632 qp_init_attr
->srq
= ibqp
->srq
;
4633 qp_attr
->cap
.max_inline_data
= qp
->max_inline_data
;
4635 qp_init_attr
->cap
= qp_attr
->cap
;
4637 qp_init_attr
->create_flags
= 0;
4638 if (qp
->flags
& MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK
)
4639 qp_init_attr
->create_flags
|= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
;
4641 if (qp
->flags
& MLX5_IB_QP_CROSS_CHANNEL
)
4642 qp_init_attr
->create_flags
|= IB_QP_CREATE_CROSS_CHANNEL
;
4643 if (qp
->flags
& MLX5_IB_QP_MANAGED_SEND
)
4644 qp_init_attr
->create_flags
|= IB_QP_CREATE_MANAGED_SEND
;
4645 if (qp
->flags
& MLX5_IB_QP_MANAGED_RECV
)
4646 qp_init_attr
->create_flags
|= IB_QP_CREATE_MANAGED_RECV
;
4647 if (qp
->flags
& MLX5_IB_QP_SQPN_QP1
)
4648 qp_init_attr
->create_flags
|= mlx5_ib_create_qp_sqpn_qp1();
4650 qp_init_attr
->sq_sig_type
= qp
->sq_signal_bits
& MLX5_WQE_CTRL_CQ_UPDATE
?
4651 IB_SIGNAL_ALL_WR
: IB_SIGNAL_REQ_WR
;
4654 mutex_unlock(&qp
->mutex
);
4658 struct ib_xrcd
*mlx5_ib_alloc_xrcd(struct ib_device
*ibdev
,
4659 struct ib_ucontext
*context
,
4660 struct ib_udata
*udata
)
4662 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
4663 struct mlx5_ib_xrcd
*xrcd
;
4666 if (!MLX5_CAP_GEN(dev
->mdev
, xrc
))
4667 return ERR_PTR(-ENOSYS
);
4669 xrcd
= kmalloc(sizeof(*xrcd
), GFP_KERNEL
);
4671 return ERR_PTR(-ENOMEM
);
4673 err
= mlx5_core_xrcd_alloc(dev
->mdev
, &xrcd
->xrcdn
);
4676 return ERR_PTR(-ENOMEM
);
4679 return &xrcd
->ibxrcd
;
4682 int mlx5_ib_dealloc_xrcd(struct ib_xrcd
*xrcd
)
4684 struct mlx5_ib_dev
*dev
= to_mdev(xrcd
->device
);
4685 u32 xrcdn
= to_mxrcd(xrcd
)->xrcdn
;
4688 err
= mlx5_core_xrcd_dealloc(dev
->mdev
, xrcdn
);
4690 mlx5_ib_warn(dev
, "failed to dealloc xrcdn 0x%x\n", xrcdn
);
4699 static void mlx5_ib_wq_event(struct mlx5_core_qp
*core_qp
, int type
)
4701 struct mlx5_ib_rwq
*rwq
= to_mibrwq(core_qp
);
4702 struct mlx5_ib_dev
*dev
= to_mdev(rwq
->ibwq
.device
);
4703 struct ib_event event
;
4705 if (rwq
->ibwq
.event_handler
) {
4706 event
.device
= rwq
->ibwq
.device
;
4707 event
.element
.wq
= &rwq
->ibwq
;
4709 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR
:
4710 event
.event
= IB_EVENT_WQ_FATAL
;
4713 mlx5_ib_warn(dev
, "Unexpected event type %d on WQ %06x\n", type
, core_qp
->qpn
);
4717 rwq
->ibwq
.event_handler(&event
, rwq
->ibwq
.wq_context
);
4721 static int set_delay_drop(struct mlx5_ib_dev
*dev
)
4725 mutex_lock(&dev
->delay_drop
.lock
);
4726 if (dev
->delay_drop
.activate
)
4729 err
= mlx5_core_set_delay_drop(dev
->mdev
, dev
->delay_drop
.timeout
);
4733 dev
->delay_drop
.activate
= true;
4735 mutex_unlock(&dev
->delay_drop
.lock
);
4738 atomic_inc(&dev
->delay_drop
.rqs_cnt
);
4742 static int create_rq(struct mlx5_ib_rwq
*rwq
, struct ib_pd
*pd
,
4743 struct ib_wq_init_attr
*init_attr
)
4745 struct mlx5_ib_dev
*dev
;
4746 int has_net_offloads
;
4754 dev
= to_mdev(pd
->device
);
4756 inlen
= MLX5_ST_SZ_BYTES(create_rq_in
) + sizeof(u64
) * rwq
->rq_num_pas
;
4757 in
= kvzalloc(inlen
, GFP_KERNEL
);
4761 rqc
= MLX5_ADDR_OF(create_rq_in
, in
, ctx
);
4762 MLX5_SET(rqc
, rqc
, mem_rq_type
,
4763 MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE
);
4764 MLX5_SET(rqc
, rqc
, user_index
, rwq
->user_index
);
4765 MLX5_SET(rqc
, rqc
, cqn
, to_mcq(init_attr
->cq
)->mcq
.cqn
);
4766 MLX5_SET(rqc
, rqc
, state
, MLX5_RQC_STATE_RST
);
4767 MLX5_SET(rqc
, rqc
, flush_in_error_en
, 1);
4768 wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
4769 MLX5_SET(wq
, wq
, wq_type
,
4770 rwq
->create_flags
& MLX5_IB_WQ_FLAGS_STRIDING_RQ
?
4771 MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ
: MLX5_WQ_TYPE_CYCLIC
);
4772 if (init_attr
->create_flags
& IB_WQ_FLAGS_PCI_WRITE_END_PADDING
) {
4773 if (!MLX5_CAP_GEN(dev
->mdev
, end_pad
)) {
4774 mlx5_ib_dbg(dev
, "Scatter end padding is not supported\n");
4778 MLX5_SET(wq
, wq
, end_padding_mode
, MLX5_WQ_END_PAD_MODE_ALIGN
);
4781 MLX5_SET(wq
, wq
, log_wq_stride
, rwq
->log_rq_stride
);
4782 if (rwq
->create_flags
& MLX5_IB_WQ_FLAGS_STRIDING_RQ
) {
4783 MLX5_SET(wq
, wq
, two_byte_shift_en
, rwq
->two_byte_shift_en
);
4784 MLX5_SET(wq
, wq
, log_wqe_stride_size
,
4785 rwq
->single_stride_log_num_of_bytes
-
4786 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES
);
4787 MLX5_SET(wq
, wq
, log_wqe_num_of_strides
, rwq
->log_num_strides
-
4788 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES
);
4790 MLX5_SET(wq
, wq
, log_wq_sz
, rwq
->log_rq_size
);
4791 MLX5_SET(wq
, wq
, pd
, to_mpd(pd
)->pdn
);
4792 MLX5_SET(wq
, wq
, page_offset
, rwq
->rq_page_offset
);
4793 MLX5_SET(wq
, wq
, log_wq_pg_sz
, rwq
->log_page_size
);
4794 MLX5_SET(wq
, wq
, wq_signature
, rwq
->wq_sig
);
4795 MLX5_SET64(wq
, wq
, dbr_addr
, rwq
->db
.dma
);
4796 has_net_offloads
= MLX5_CAP_GEN(dev
->mdev
, eth_net_offloads
);
4797 if (init_attr
->create_flags
& IB_WQ_FLAGS_CVLAN_STRIPPING
) {
4798 if (!(has_net_offloads
&& MLX5_CAP_ETH(dev
->mdev
, vlan_cap
))) {
4799 mlx5_ib_dbg(dev
, "VLAN offloads are not supported\n");
4804 MLX5_SET(rqc
, rqc
, vsd
, 1);
4806 if (init_attr
->create_flags
& IB_WQ_FLAGS_SCATTER_FCS
) {
4807 if (!(has_net_offloads
&& MLX5_CAP_ETH(dev
->mdev
, scatter_fcs
))) {
4808 mlx5_ib_dbg(dev
, "Scatter FCS is not supported\n");
4812 MLX5_SET(rqc
, rqc
, scatter_fcs
, 1);
4814 if (init_attr
->create_flags
& IB_WQ_FLAGS_DELAY_DROP
) {
4815 if (!(dev
->ib_dev
.attrs
.raw_packet_caps
&
4816 IB_RAW_PACKET_CAP_DELAY_DROP
)) {
4817 mlx5_ib_dbg(dev
, "Delay drop is not supported\n");
4821 MLX5_SET(rqc
, rqc
, delay_drop_en
, 1);
4823 rq_pas0
= (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
);
4824 mlx5_ib_populate_pas(dev
, rwq
->umem
, rwq
->page_shift
, rq_pas0
, 0);
4825 err
= mlx5_core_create_rq_tracked(dev
->mdev
, in
, inlen
, &rwq
->core_qp
);
4826 if (!err
&& init_attr
->create_flags
& IB_WQ_FLAGS_DELAY_DROP
) {
4827 err
= set_delay_drop(dev
);
4829 mlx5_ib_warn(dev
, "Failed to enable delay drop err=%d\n",
4831 mlx5_core_destroy_rq_tracked(dev
->mdev
, &rwq
->core_qp
);
4833 rwq
->create_flags
|= MLX5_IB_WQ_FLAGS_DELAY_DROP
;
4841 static int set_user_rq_size(struct mlx5_ib_dev
*dev
,
4842 struct ib_wq_init_attr
*wq_init_attr
,
4843 struct mlx5_ib_create_wq
*ucmd
,
4844 struct mlx5_ib_rwq
*rwq
)
4846 /* Sanity check RQ size before proceeding */
4847 if (wq_init_attr
->max_wr
> (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_wq_sz
)))
4850 if (!ucmd
->rq_wqe_count
)
4853 rwq
->wqe_count
= ucmd
->rq_wqe_count
;
4854 rwq
->wqe_shift
= ucmd
->rq_wqe_shift
;
4855 rwq
->buf_size
= (rwq
->wqe_count
<< rwq
->wqe_shift
);
4856 rwq
->log_rq_stride
= rwq
->wqe_shift
;
4857 rwq
->log_rq_size
= ilog2(rwq
->wqe_count
);
4861 static int prepare_user_rq(struct ib_pd
*pd
,
4862 struct ib_wq_init_attr
*init_attr
,
4863 struct ib_udata
*udata
,
4864 struct mlx5_ib_rwq
*rwq
)
4866 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
4867 struct mlx5_ib_create_wq ucmd
= {};
4869 size_t required_cmd_sz
;
4871 required_cmd_sz
= offsetof(typeof(ucmd
), single_stride_log_num_of_bytes
)
4872 + sizeof(ucmd
.single_stride_log_num_of_bytes
);
4873 if (udata
->inlen
< required_cmd_sz
) {
4874 mlx5_ib_dbg(dev
, "invalid inlen\n");
4878 if (udata
->inlen
> sizeof(ucmd
) &&
4879 !ib_is_udata_cleared(udata
, sizeof(ucmd
),
4880 udata
->inlen
- sizeof(ucmd
))) {
4881 mlx5_ib_dbg(dev
, "inlen is not supported\n");
4885 if (ib_copy_from_udata(&ucmd
, udata
, min(sizeof(ucmd
), udata
->inlen
))) {
4886 mlx5_ib_dbg(dev
, "copy failed\n");
4890 if (ucmd
.comp_mask
& (~MLX5_IB_CREATE_WQ_STRIDING_RQ
)) {
4891 mlx5_ib_dbg(dev
, "invalid comp mask\n");
4893 } else if (ucmd
.comp_mask
& MLX5_IB_CREATE_WQ_STRIDING_RQ
) {
4894 if (!MLX5_CAP_GEN(dev
->mdev
, striding_rq
)) {
4895 mlx5_ib_dbg(dev
, "Striding RQ is not supported\n");
4898 if ((ucmd
.single_stride_log_num_of_bytes
<
4899 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES
) ||
4900 (ucmd
.single_stride_log_num_of_bytes
>
4901 MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES
)) {
4902 mlx5_ib_dbg(dev
, "Invalid log stride size (%u. Range is %u - %u)\n",
4903 ucmd
.single_stride_log_num_of_bytes
,
4904 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES
,
4905 MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES
);
4908 if ((ucmd
.single_wqe_log_num_of_strides
>
4909 MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES
) ||
4910 (ucmd
.single_wqe_log_num_of_strides
<
4911 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES
)) {
4912 mlx5_ib_dbg(dev
, "Invalid log num strides (%u. Range is %u - %u)\n",
4913 ucmd
.single_wqe_log_num_of_strides
,
4914 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES
,
4915 MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES
);
4918 rwq
->single_stride_log_num_of_bytes
=
4919 ucmd
.single_stride_log_num_of_bytes
;
4920 rwq
->log_num_strides
= ucmd
.single_wqe_log_num_of_strides
;
4921 rwq
->two_byte_shift_en
= !!ucmd
.two_byte_shift_en
;
4922 rwq
->create_flags
|= MLX5_IB_WQ_FLAGS_STRIDING_RQ
;
4925 err
= set_user_rq_size(dev
, init_attr
, &ucmd
, rwq
);
4927 mlx5_ib_dbg(dev
, "err %d\n", err
);
4931 err
= create_user_rq(dev
, pd
, rwq
, &ucmd
);
4933 mlx5_ib_dbg(dev
, "err %d\n", err
);
4938 rwq
->user_index
= ucmd
.user_index
;
4942 struct ib_wq
*mlx5_ib_create_wq(struct ib_pd
*pd
,
4943 struct ib_wq_init_attr
*init_attr
,
4944 struct ib_udata
*udata
)
4946 struct mlx5_ib_dev
*dev
;
4947 struct mlx5_ib_rwq
*rwq
;
4948 struct mlx5_ib_create_wq_resp resp
= {};
4949 size_t min_resp_len
;
4953 return ERR_PTR(-ENOSYS
);
4955 min_resp_len
= offsetof(typeof(resp
), reserved
) + sizeof(resp
.reserved
);
4956 if (udata
->outlen
&& udata
->outlen
< min_resp_len
)
4957 return ERR_PTR(-EINVAL
);
4959 dev
= to_mdev(pd
->device
);
4960 switch (init_attr
->wq_type
) {
4962 rwq
= kzalloc(sizeof(*rwq
), GFP_KERNEL
);
4964 return ERR_PTR(-ENOMEM
);
4965 err
= prepare_user_rq(pd
, init_attr
, udata
, rwq
);
4968 err
= create_rq(rwq
, pd
, init_attr
);
4973 mlx5_ib_dbg(dev
, "unsupported wq type %d\n",
4974 init_attr
->wq_type
);
4975 return ERR_PTR(-EINVAL
);
4978 rwq
->ibwq
.wq_num
= rwq
->core_qp
.qpn
;
4979 rwq
->ibwq
.state
= IB_WQS_RESET
;
4980 if (udata
->outlen
) {
4981 resp
.response_length
= offsetof(typeof(resp
), response_length
) +
4982 sizeof(resp
.response_length
);
4983 err
= ib_copy_to_udata(udata
, &resp
, resp
.response_length
);
4988 rwq
->core_qp
.event
= mlx5_ib_wq_event
;
4989 rwq
->ibwq
.event_handler
= init_attr
->event_handler
;
4993 mlx5_core_destroy_rq_tracked(dev
->mdev
, &rwq
->core_qp
);
4995 destroy_user_rq(dev
, pd
, rwq
);
4998 return ERR_PTR(err
);
5001 int mlx5_ib_destroy_wq(struct ib_wq
*wq
)
5003 struct mlx5_ib_dev
*dev
= to_mdev(wq
->device
);
5004 struct mlx5_ib_rwq
*rwq
= to_mrwq(wq
);
5006 mlx5_core_destroy_rq_tracked(dev
->mdev
, &rwq
->core_qp
);
5007 destroy_user_rq(dev
, wq
->pd
, rwq
);
5013 struct ib_rwq_ind_table
*mlx5_ib_create_rwq_ind_table(struct ib_device
*device
,
5014 struct ib_rwq_ind_table_init_attr
*init_attr
,
5015 struct ib_udata
*udata
)
5017 struct mlx5_ib_dev
*dev
= to_mdev(device
);
5018 struct mlx5_ib_rwq_ind_table
*rwq_ind_tbl
;
5019 int sz
= 1 << init_attr
->log_ind_tbl_size
;
5020 struct mlx5_ib_create_rwq_ind_tbl_resp resp
= {};
5021 size_t min_resp_len
;
5028 if (udata
->inlen
> 0 &&
5029 !ib_is_udata_cleared(udata
, 0,
5031 return ERR_PTR(-EOPNOTSUPP
);
5033 if (init_attr
->log_ind_tbl_size
>
5034 MLX5_CAP_GEN(dev
->mdev
, log_max_rqt_size
)) {
5035 mlx5_ib_dbg(dev
, "log_ind_tbl_size = %d is bigger than supported = %d\n",
5036 init_attr
->log_ind_tbl_size
,
5037 MLX5_CAP_GEN(dev
->mdev
, log_max_rqt_size
));
5038 return ERR_PTR(-EINVAL
);
5041 min_resp_len
= offsetof(typeof(resp
), reserved
) + sizeof(resp
.reserved
);
5042 if (udata
->outlen
&& udata
->outlen
< min_resp_len
)
5043 return ERR_PTR(-EINVAL
);
5045 rwq_ind_tbl
= kzalloc(sizeof(*rwq_ind_tbl
), GFP_KERNEL
);
5047 return ERR_PTR(-ENOMEM
);
5049 inlen
= MLX5_ST_SZ_BYTES(create_rqt_in
) + sizeof(u32
) * sz
;
5050 in
= kvzalloc(inlen
, GFP_KERNEL
);
5056 rqtc
= MLX5_ADDR_OF(create_rqt_in
, in
, rqt_context
);
5058 MLX5_SET(rqtc
, rqtc
, rqt_actual_size
, sz
);
5059 MLX5_SET(rqtc
, rqtc
, rqt_max_size
, sz
);
5061 for (i
= 0; i
< sz
; i
++)
5062 MLX5_SET(rqtc
, rqtc
, rq_num
[i
], init_attr
->ind_tbl
[i
]->wq_num
);
5064 err
= mlx5_core_create_rqt(dev
->mdev
, in
, inlen
, &rwq_ind_tbl
->rqtn
);
5070 rwq_ind_tbl
->ib_rwq_ind_tbl
.ind_tbl_num
= rwq_ind_tbl
->rqtn
;
5071 if (udata
->outlen
) {
5072 resp
.response_length
= offsetof(typeof(resp
), response_length
) +
5073 sizeof(resp
.response_length
);
5074 err
= ib_copy_to_udata(udata
, &resp
, resp
.response_length
);
5079 return &rwq_ind_tbl
->ib_rwq_ind_tbl
;
5082 mlx5_core_destroy_rqt(dev
->mdev
, rwq_ind_tbl
->rqtn
);
5085 return ERR_PTR(err
);
5088 int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table
*ib_rwq_ind_tbl
)
5090 struct mlx5_ib_rwq_ind_table
*rwq_ind_tbl
= to_mrwq_ind_table(ib_rwq_ind_tbl
);
5091 struct mlx5_ib_dev
*dev
= to_mdev(ib_rwq_ind_tbl
->device
);
5093 mlx5_core_destroy_rqt(dev
->mdev
, rwq_ind_tbl
->rqtn
);
5099 int mlx5_ib_modify_wq(struct ib_wq
*wq
, struct ib_wq_attr
*wq_attr
,
5100 u32 wq_attr_mask
, struct ib_udata
*udata
)
5102 struct mlx5_ib_dev
*dev
= to_mdev(wq
->device
);
5103 struct mlx5_ib_rwq
*rwq
= to_mrwq(wq
);
5104 struct mlx5_ib_modify_wq ucmd
= {};
5105 size_t required_cmd_sz
;
5113 required_cmd_sz
= offsetof(typeof(ucmd
), reserved
) + sizeof(ucmd
.reserved
);
5114 if (udata
->inlen
< required_cmd_sz
)
5117 if (udata
->inlen
> sizeof(ucmd
) &&
5118 !ib_is_udata_cleared(udata
, sizeof(ucmd
),
5119 udata
->inlen
- sizeof(ucmd
)))
5122 if (ib_copy_from_udata(&ucmd
, udata
, min(sizeof(ucmd
), udata
->inlen
)))
5125 if (ucmd
.comp_mask
|| ucmd
.reserved
)
5128 inlen
= MLX5_ST_SZ_BYTES(modify_rq_in
);
5129 in
= kvzalloc(inlen
, GFP_KERNEL
);
5133 rqc
= MLX5_ADDR_OF(modify_rq_in
, in
, ctx
);
5135 curr_wq_state
= (wq_attr_mask
& IB_WQ_CUR_STATE
) ?
5136 wq_attr
->curr_wq_state
: wq
->state
;
5137 wq_state
= (wq_attr_mask
& IB_WQ_STATE
) ?
5138 wq_attr
->wq_state
: curr_wq_state
;
5139 if (curr_wq_state
== IB_WQS_ERR
)
5140 curr_wq_state
= MLX5_RQC_STATE_ERR
;
5141 if (wq_state
== IB_WQS_ERR
)
5142 wq_state
= MLX5_RQC_STATE_ERR
;
5143 MLX5_SET(modify_rq_in
, in
, rq_state
, curr_wq_state
);
5144 MLX5_SET(rqc
, rqc
, state
, wq_state
);
5146 if (wq_attr_mask
& IB_WQ_FLAGS
) {
5147 if (wq_attr
->flags_mask
& IB_WQ_FLAGS_CVLAN_STRIPPING
) {
5148 if (!(MLX5_CAP_GEN(dev
->mdev
, eth_net_offloads
) &&
5149 MLX5_CAP_ETH(dev
->mdev
, vlan_cap
))) {
5150 mlx5_ib_dbg(dev
, "VLAN offloads are not "
5155 MLX5_SET64(modify_rq_in
, in
, modify_bitmask
,
5156 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD
);
5157 MLX5_SET(rqc
, rqc
, vsd
,
5158 (wq_attr
->flags
& IB_WQ_FLAGS_CVLAN_STRIPPING
) ? 0 : 1);
5161 if (wq_attr
->flags_mask
& IB_WQ_FLAGS_PCI_WRITE_END_PADDING
) {
5162 mlx5_ib_dbg(dev
, "Modifying scatter end padding is not supported\n");
5168 if (curr_wq_state
== IB_WQS_RESET
&& wq_state
== IB_WQS_RDY
) {
5169 if (MLX5_CAP_GEN(dev
->mdev
, modify_rq_counter_set_id
)) {
5170 MLX5_SET64(modify_rq_in
, in
, modify_bitmask
,
5171 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID
);
5172 MLX5_SET(rqc
, rqc
, counter_set_id
,
5173 dev
->port
->cnts
.set_id
);
5175 pr_info_once("%s: Receive WQ counters are not supported on current FW\n",
5179 err
= mlx5_core_modify_rq(dev
->mdev
, rwq
->core_qp
.qpn
, in
, inlen
);
5181 rwq
->ibwq
.state
= (wq_state
== MLX5_RQC_STATE_ERR
) ? IB_WQS_ERR
: wq_state
;