2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
34 #include <rdma/ib_umem.h>
35 #include <rdma/ib_cache.h>
36 #include <rdma/ib_user_verbs.h>
37 #include <linux/mlx5/fs.h>
40 /* not supported currently */
41 static int wq_signature
;
44 MLX5_IB_ACK_REQ_FREQ
= 8,
48 MLX5_IB_DEFAULT_SCHED_QUEUE
= 0x83,
49 MLX5_IB_DEFAULT_QP0_SCHED_QUEUE
= 0x3f,
50 MLX5_IB_LINK_TYPE_IB
= 0,
51 MLX5_IB_LINK_TYPE_ETH
= 1
55 MLX5_IB_SQ_STRIDE
= 6,
58 static const u32 mlx5_ib_opcode
[] = {
59 [IB_WR_SEND
] = MLX5_OPCODE_SEND
,
60 [IB_WR_LSO
] = MLX5_OPCODE_LSO
,
61 [IB_WR_SEND_WITH_IMM
] = MLX5_OPCODE_SEND_IMM
,
62 [IB_WR_RDMA_WRITE
] = MLX5_OPCODE_RDMA_WRITE
,
63 [IB_WR_RDMA_WRITE_WITH_IMM
] = MLX5_OPCODE_RDMA_WRITE_IMM
,
64 [IB_WR_RDMA_READ
] = MLX5_OPCODE_RDMA_READ
,
65 [IB_WR_ATOMIC_CMP_AND_SWP
] = MLX5_OPCODE_ATOMIC_CS
,
66 [IB_WR_ATOMIC_FETCH_AND_ADD
] = MLX5_OPCODE_ATOMIC_FA
,
67 [IB_WR_SEND_WITH_INV
] = MLX5_OPCODE_SEND_INVAL
,
68 [IB_WR_LOCAL_INV
] = MLX5_OPCODE_UMR
,
69 [IB_WR_REG_MR
] = MLX5_OPCODE_UMR
,
70 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP
] = MLX5_OPCODE_ATOMIC_MASKED_CS
,
71 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
] = MLX5_OPCODE_ATOMIC_MASKED_FA
,
72 [MLX5_IB_WR_UMR
] = MLX5_OPCODE_UMR
,
75 struct mlx5_wqe_eth_pad
{
79 enum raw_qp_set_mask_map
{
80 MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID
= 1UL << 0,
81 MLX5_RAW_QP_RATE_LIMIT
= 1UL << 1,
84 struct mlx5_modify_raw_qp_param
{
87 u32 set_mask
; /* raw_qp_set_mask_map */
92 static void get_cqs(enum ib_qp_type qp_type
,
93 struct ib_cq
*ib_send_cq
, struct ib_cq
*ib_recv_cq
,
94 struct mlx5_ib_cq
**send_cq
, struct mlx5_ib_cq
**recv_cq
);
96 static int is_qp0(enum ib_qp_type qp_type
)
98 return qp_type
== IB_QPT_SMI
;
101 static int is_sqp(enum ib_qp_type qp_type
)
103 return is_qp0(qp_type
) || is_qp1(qp_type
);
106 static void *get_wqe(struct mlx5_ib_qp
*qp
, int offset
)
108 return mlx5_buf_offset(&qp
->buf
, offset
);
111 static void *get_recv_wqe(struct mlx5_ib_qp
*qp
, int n
)
113 return get_wqe(qp
, qp
->rq
.offset
+ (n
<< qp
->rq
.wqe_shift
));
116 void *mlx5_get_send_wqe(struct mlx5_ib_qp
*qp
, int n
)
118 return get_wqe(qp
, qp
->sq
.offset
+ (n
<< MLX5_IB_SQ_STRIDE
));
122 * mlx5_ib_read_user_wqe() - Copy a user-space WQE to kernel space.
124 * @qp: QP to copy from.
125 * @send: copy from the send queue when non-zero, use the receive queue
127 * @wqe_index: index to start copying from. For send work queues, the
128 * wqe_index is in units of MLX5_SEND_WQE_BB.
129 * For receive work queue, it is the number of work queue
130 * element in the queue.
131 * @buffer: destination buffer.
132 * @length: maximum number of bytes to copy.
134 * Copies at least a single WQE, but may copy more data.
136 * Return: the number of bytes copied, or an error code.
138 int mlx5_ib_read_user_wqe(struct mlx5_ib_qp
*qp
, int send
, int wqe_index
,
139 void *buffer
, u32 length
,
140 struct mlx5_ib_qp_base
*base
)
142 struct ib_device
*ibdev
= qp
->ibqp
.device
;
143 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
144 struct mlx5_ib_wq
*wq
= send
? &qp
->sq
: &qp
->rq
;
147 struct ib_umem
*umem
= base
->ubuffer
.umem
;
148 u32 first_copy_length
;
152 if (wq
->wqe_cnt
== 0) {
153 mlx5_ib_dbg(dev
, "mlx5_ib_read_user_wqe for a QP with wqe_cnt == 0. qp_type: 0x%x\n",
158 offset
= wq
->offset
+ ((wqe_index
% wq
->wqe_cnt
) << wq
->wqe_shift
);
159 wq_end
= wq
->offset
+ (wq
->wqe_cnt
<< wq
->wqe_shift
);
161 if (send
&& length
< sizeof(struct mlx5_wqe_ctrl_seg
))
164 if (offset
> umem
->length
||
165 (send
&& offset
+ sizeof(struct mlx5_wqe_ctrl_seg
) > umem
->length
))
168 first_copy_length
= min_t(u32
, offset
+ length
, wq_end
) - offset
;
169 ret
= ib_umem_copy_from(buffer
, umem
, offset
, first_copy_length
);
174 struct mlx5_wqe_ctrl_seg
*ctrl
= buffer
;
175 int ds
= be32_to_cpu(ctrl
->qpn_ds
) & MLX5_WQE_CTRL_DS_MASK
;
177 wqe_length
= ds
* MLX5_WQE_DS_UNITS
;
179 wqe_length
= 1 << wq
->wqe_shift
;
182 if (wqe_length
<= first_copy_length
)
183 return first_copy_length
;
185 ret
= ib_umem_copy_from(buffer
+ first_copy_length
, umem
, wq
->offset
,
186 wqe_length
- first_copy_length
);
193 static void mlx5_ib_qp_event(struct mlx5_core_qp
*qp
, int type
)
195 struct ib_qp
*ibqp
= &to_mibqp(qp
)->ibqp
;
196 struct ib_event event
;
198 if (type
== MLX5_EVENT_TYPE_PATH_MIG
) {
199 /* This event is only valid for trans_qps */
200 to_mibqp(qp
)->port
= to_mibqp(qp
)->trans_qp
.alt_port
;
203 if (ibqp
->event_handler
) {
204 event
.device
= ibqp
->device
;
205 event
.element
.qp
= ibqp
;
207 case MLX5_EVENT_TYPE_PATH_MIG
:
208 event
.event
= IB_EVENT_PATH_MIG
;
210 case MLX5_EVENT_TYPE_COMM_EST
:
211 event
.event
= IB_EVENT_COMM_EST
;
213 case MLX5_EVENT_TYPE_SQ_DRAINED
:
214 event
.event
= IB_EVENT_SQ_DRAINED
;
216 case MLX5_EVENT_TYPE_SRQ_LAST_WQE
:
217 event
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
219 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR
:
220 event
.event
= IB_EVENT_QP_FATAL
;
222 case MLX5_EVENT_TYPE_PATH_MIG_FAILED
:
223 event
.event
= IB_EVENT_PATH_MIG_ERR
;
225 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR
:
226 event
.event
= IB_EVENT_QP_REQ_ERR
;
228 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR
:
229 event
.event
= IB_EVENT_QP_ACCESS_ERR
;
232 pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type
, qp
->qpn
);
236 ibqp
->event_handler(&event
, ibqp
->qp_context
);
240 static int set_rq_size(struct mlx5_ib_dev
*dev
, struct ib_qp_cap
*cap
,
241 int has_rq
, struct mlx5_ib_qp
*qp
, struct mlx5_ib_create_qp
*ucmd
)
246 /* Sanity check RQ size before proceeding */
247 if (cap
->max_recv_wr
> (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_qp_sz
)))
253 qp
->rq
.wqe_shift
= 0;
254 cap
->max_recv_wr
= 0;
255 cap
->max_recv_sge
= 0;
258 qp
->rq
.wqe_cnt
= ucmd
->rq_wqe_count
;
259 if (ucmd
->rq_wqe_shift
> BITS_PER_BYTE
* sizeof(ucmd
->rq_wqe_shift
))
261 qp
->rq
.wqe_shift
= ucmd
->rq_wqe_shift
;
262 if ((1 << qp
->rq
.wqe_shift
) / sizeof(struct mlx5_wqe_data_seg
) < qp
->wq_sig
)
264 qp
->rq
.max_gs
= (1 << qp
->rq
.wqe_shift
) / sizeof(struct mlx5_wqe_data_seg
) - qp
->wq_sig
;
265 qp
->rq
.max_post
= qp
->rq
.wqe_cnt
;
267 wqe_size
= qp
->wq_sig
? sizeof(struct mlx5_wqe_signature_seg
) : 0;
268 wqe_size
+= cap
->max_recv_sge
* sizeof(struct mlx5_wqe_data_seg
);
269 wqe_size
= roundup_pow_of_two(wqe_size
);
270 wq_size
= roundup_pow_of_two(cap
->max_recv_wr
) * wqe_size
;
271 wq_size
= max_t(int, wq_size
, MLX5_SEND_WQE_BB
);
272 qp
->rq
.wqe_cnt
= wq_size
/ wqe_size
;
273 if (wqe_size
> MLX5_CAP_GEN(dev
->mdev
, max_wqe_sz_rq
)) {
274 mlx5_ib_dbg(dev
, "wqe_size %d, max %d\n",
276 MLX5_CAP_GEN(dev
->mdev
,
280 qp
->rq
.wqe_shift
= ilog2(wqe_size
);
281 qp
->rq
.max_gs
= (1 << qp
->rq
.wqe_shift
) / sizeof(struct mlx5_wqe_data_seg
) - qp
->wq_sig
;
282 qp
->rq
.max_post
= qp
->rq
.wqe_cnt
;
289 static int sq_overhead(struct ib_qp_init_attr
*attr
)
293 switch (attr
->qp_type
) {
295 size
+= sizeof(struct mlx5_wqe_xrc_seg
);
298 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
299 max(sizeof(struct mlx5_wqe_atomic_seg
) +
300 sizeof(struct mlx5_wqe_raddr_seg
),
301 sizeof(struct mlx5_wqe_umr_ctrl_seg
) +
302 sizeof(struct mlx5_mkey_seg
));
309 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
310 max(sizeof(struct mlx5_wqe_raddr_seg
),
311 sizeof(struct mlx5_wqe_umr_ctrl_seg
) +
312 sizeof(struct mlx5_mkey_seg
));
316 if (attr
->create_flags
& IB_QP_CREATE_IPOIB_UD_LSO
)
317 size
+= sizeof(struct mlx5_wqe_eth_pad
) +
318 sizeof(struct mlx5_wqe_eth_seg
);
321 case MLX5_IB_QPT_HW_GSI
:
322 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
323 sizeof(struct mlx5_wqe_datagram_seg
);
326 case MLX5_IB_QPT_REG_UMR
:
327 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
328 sizeof(struct mlx5_wqe_umr_ctrl_seg
) +
329 sizeof(struct mlx5_mkey_seg
);
339 static int calc_send_wqe(struct ib_qp_init_attr
*attr
)
344 size
= sq_overhead(attr
);
348 if (attr
->cap
.max_inline_data
) {
349 inl_size
= size
+ sizeof(struct mlx5_wqe_inline_seg
) +
350 attr
->cap
.max_inline_data
;
353 size
+= attr
->cap
.max_send_sge
* sizeof(struct mlx5_wqe_data_seg
);
354 if (attr
->create_flags
& IB_QP_CREATE_SIGNATURE_EN
&&
355 ALIGN(max_t(int, inl_size
, size
), MLX5_SEND_WQE_BB
) < MLX5_SIG_WQE_SIZE
)
356 return MLX5_SIG_WQE_SIZE
;
358 return ALIGN(max_t(int, inl_size
, size
), MLX5_SEND_WQE_BB
);
361 static int get_send_sge(struct ib_qp_init_attr
*attr
, int wqe_size
)
365 if (attr
->qp_type
== IB_QPT_RC
)
366 max_sge
= (min_t(int, wqe_size
, 512) -
367 sizeof(struct mlx5_wqe_ctrl_seg
) -
368 sizeof(struct mlx5_wqe_raddr_seg
)) /
369 sizeof(struct mlx5_wqe_data_seg
);
370 else if (attr
->qp_type
== IB_QPT_XRC_INI
)
371 max_sge
= (min_t(int, wqe_size
, 512) -
372 sizeof(struct mlx5_wqe_ctrl_seg
) -
373 sizeof(struct mlx5_wqe_xrc_seg
) -
374 sizeof(struct mlx5_wqe_raddr_seg
)) /
375 sizeof(struct mlx5_wqe_data_seg
);
377 max_sge
= (wqe_size
- sq_overhead(attr
)) /
378 sizeof(struct mlx5_wqe_data_seg
);
380 return min_t(int, max_sge
, wqe_size
- sq_overhead(attr
) /
381 sizeof(struct mlx5_wqe_data_seg
));
384 static int calc_sq_size(struct mlx5_ib_dev
*dev
, struct ib_qp_init_attr
*attr
,
385 struct mlx5_ib_qp
*qp
)
390 if (!attr
->cap
.max_send_wr
)
393 wqe_size
= calc_send_wqe(attr
);
394 mlx5_ib_dbg(dev
, "wqe_size %d\n", wqe_size
);
398 if (wqe_size
> MLX5_CAP_GEN(dev
->mdev
, max_wqe_sz_sq
)) {
399 mlx5_ib_dbg(dev
, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
400 wqe_size
, MLX5_CAP_GEN(dev
->mdev
, max_wqe_sz_sq
));
404 qp
->max_inline_data
= wqe_size
- sq_overhead(attr
) -
405 sizeof(struct mlx5_wqe_inline_seg
);
406 attr
->cap
.max_inline_data
= qp
->max_inline_data
;
408 if (attr
->create_flags
& IB_QP_CREATE_SIGNATURE_EN
)
409 qp
->signature_en
= true;
411 wq_size
= roundup_pow_of_two(attr
->cap
.max_send_wr
* wqe_size
);
412 qp
->sq
.wqe_cnt
= wq_size
/ MLX5_SEND_WQE_BB
;
413 if (qp
->sq
.wqe_cnt
> (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_qp_sz
))) {
414 mlx5_ib_dbg(dev
, "send queue size (%d * %d / %d -> %d) exceeds limits(%d)\n",
415 attr
->cap
.max_send_wr
, wqe_size
, MLX5_SEND_WQE_BB
,
417 1 << MLX5_CAP_GEN(dev
->mdev
, log_max_qp_sz
));
420 qp
->sq
.wqe_shift
= ilog2(MLX5_SEND_WQE_BB
);
421 qp
->sq
.max_gs
= get_send_sge(attr
, wqe_size
);
422 if (qp
->sq
.max_gs
< attr
->cap
.max_send_sge
)
425 attr
->cap
.max_send_sge
= qp
->sq
.max_gs
;
426 qp
->sq
.max_post
= wq_size
/ wqe_size
;
427 attr
->cap
.max_send_wr
= qp
->sq
.max_post
;
432 static int set_user_buf_size(struct mlx5_ib_dev
*dev
,
433 struct mlx5_ib_qp
*qp
,
434 struct mlx5_ib_create_qp
*ucmd
,
435 struct mlx5_ib_qp_base
*base
,
436 struct ib_qp_init_attr
*attr
)
438 int desc_sz
= 1 << qp
->sq
.wqe_shift
;
440 if (desc_sz
> MLX5_CAP_GEN(dev
->mdev
, max_wqe_sz_sq
)) {
441 mlx5_ib_warn(dev
, "desc_sz %d, max_sq_desc_sz %d\n",
442 desc_sz
, MLX5_CAP_GEN(dev
->mdev
, max_wqe_sz_sq
));
446 if (ucmd
->sq_wqe_count
&& ((1 << ilog2(ucmd
->sq_wqe_count
)) != ucmd
->sq_wqe_count
)) {
447 mlx5_ib_warn(dev
, "sq_wqe_count %d, sq_wqe_count %d\n",
448 ucmd
->sq_wqe_count
, ucmd
->sq_wqe_count
);
452 qp
->sq
.wqe_cnt
= ucmd
->sq_wqe_count
;
454 if (qp
->sq
.wqe_cnt
> (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_qp_sz
))) {
455 mlx5_ib_warn(dev
, "wqe_cnt %d, max_wqes %d\n",
457 1 << MLX5_CAP_GEN(dev
->mdev
, log_max_qp_sz
));
461 if (attr
->qp_type
== IB_QPT_RAW_PACKET
||
462 qp
->flags
& MLX5_IB_QP_UNDERLAY
) {
463 base
->ubuffer
.buf_size
= qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
;
464 qp
->raw_packet_qp
.sq
.ubuffer
.buf_size
= qp
->sq
.wqe_cnt
<< 6;
466 base
->ubuffer
.buf_size
= (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
) +
467 (qp
->sq
.wqe_cnt
<< 6);
473 static int qp_has_rq(struct ib_qp_init_attr
*attr
)
475 if (attr
->qp_type
== IB_QPT_XRC_INI
||
476 attr
->qp_type
== IB_QPT_XRC_TGT
|| attr
->srq
||
477 attr
->qp_type
== MLX5_IB_QPT_REG_UMR
||
478 !attr
->cap
.max_recv_wr
)
484 static int first_med_bfreg(void)
490 /* this is the first blue flame register in the array of bfregs assigned
491 * to a processes. Since we do not use it for blue flame but rather
492 * regular 64 bit doorbells, we do not need a lock for maintaiing
495 NUM_NON_BLUE_FLAME_BFREGS
= 1,
498 static int max_bfregs(struct mlx5_ib_dev
*dev
, struct mlx5_bfreg_info
*bfregi
)
500 return get_num_uars(dev
, bfregi
) * MLX5_NON_FP_BFREGS_PER_UAR
;
503 static int num_med_bfreg(struct mlx5_ib_dev
*dev
,
504 struct mlx5_bfreg_info
*bfregi
)
508 n
= max_bfregs(dev
, bfregi
) - bfregi
->num_low_latency_bfregs
-
509 NUM_NON_BLUE_FLAME_BFREGS
;
511 return n
>= 0 ? n
: 0;
514 static int first_hi_bfreg(struct mlx5_ib_dev
*dev
,
515 struct mlx5_bfreg_info
*bfregi
)
519 med
= num_med_bfreg(dev
, bfregi
);
523 static int alloc_high_class_bfreg(struct mlx5_ib_dev
*dev
,
524 struct mlx5_bfreg_info
*bfregi
)
528 for (i
= first_hi_bfreg(dev
, bfregi
); i
< max_bfregs(dev
, bfregi
); i
++) {
529 if (!bfregi
->count
[i
]) {
538 static int alloc_med_class_bfreg(struct mlx5_ib_dev
*dev
,
539 struct mlx5_bfreg_info
*bfregi
)
541 int minidx
= first_med_bfreg();
544 for (i
= first_med_bfreg(); i
< first_hi_bfreg(dev
, bfregi
); i
++) {
545 if (bfregi
->count
[i
] < bfregi
->count
[minidx
])
547 if (!bfregi
->count
[minidx
])
551 bfregi
->count
[minidx
]++;
555 static int alloc_bfreg(struct mlx5_ib_dev
*dev
,
556 struct mlx5_bfreg_info
*bfregi
,
557 enum mlx5_ib_latency_class lat
)
559 int bfregn
= -EINVAL
;
561 mutex_lock(&bfregi
->lock
);
563 case MLX5_IB_LATENCY_CLASS_LOW
:
564 BUILD_BUG_ON(NUM_NON_BLUE_FLAME_BFREGS
!= 1);
566 bfregi
->count
[bfregn
]++;
569 case MLX5_IB_LATENCY_CLASS_MEDIUM
:
573 bfregn
= alloc_med_class_bfreg(dev
, bfregi
);
576 case MLX5_IB_LATENCY_CLASS_HIGH
:
580 bfregn
= alloc_high_class_bfreg(dev
, bfregi
);
583 mutex_unlock(&bfregi
->lock
);
588 static void free_bfreg(struct mlx5_ib_dev
*dev
, struct mlx5_bfreg_info
*bfregi
, int bfregn
)
590 mutex_lock(&bfregi
->lock
);
591 bfregi
->count
[bfregn
]--;
592 mutex_unlock(&bfregi
->lock
);
595 static enum mlx5_qp_state
to_mlx5_state(enum ib_qp_state state
)
598 case IB_QPS_RESET
: return MLX5_QP_STATE_RST
;
599 case IB_QPS_INIT
: return MLX5_QP_STATE_INIT
;
600 case IB_QPS_RTR
: return MLX5_QP_STATE_RTR
;
601 case IB_QPS_RTS
: return MLX5_QP_STATE_RTS
;
602 case IB_QPS_SQD
: return MLX5_QP_STATE_SQD
;
603 case IB_QPS_SQE
: return MLX5_QP_STATE_SQER
;
604 case IB_QPS_ERR
: return MLX5_QP_STATE_ERR
;
609 static int to_mlx5_st(enum ib_qp_type type
)
612 case IB_QPT_RC
: return MLX5_QP_ST_RC
;
613 case IB_QPT_UC
: return MLX5_QP_ST_UC
;
614 case IB_QPT_UD
: return MLX5_QP_ST_UD
;
615 case MLX5_IB_QPT_REG_UMR
: return MLX5_QP_ST_REG_UMR
;
617 case IB_QPT_XRC_TGT
: return MLX5_QP_ST_XRC
;
618 case IB_QPT_SMI
: return MLX5_QP_ST_QP0
;
619 case MLX5_IB_QPT_HW_GSI
: return MLX5_QP_ST_QP1
;
620 case IB_QPT_RAW_IPV6
: return MLX5_QP_ST_RAW_IPV6
;
621 case IB_QPT_RAW_PACKET
:
622 case IB_QPT_RAW_ETHERTYPE
: return MLX5_QP_ST_RAW_ETHERTYPE
;
624 default: return -EINVAL
;
628 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq
*send_cq
,
629 struct mlx5_ib_cq
*recv_cq
);
630 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq
*send_cq
,
631 struct mlx5_ib_cq
*recv_cq
);
633 static int bfregn_to_uar_index(struct mlx5_ib_dev
*dev
,
634 struct mlx5_bfreg_info
*bfregi
, int bfregn
)
636 int bfregs_per_sys_page
;
637 int index_of_sys_page
;
640 bfregs_per_sys_page
= get_uars_per_sys_page(dev
, bfregi
->lib_uar_4k
) *
641 MLX5_NON_FP_BFREGS_PER_UAR
;
642 index_of_sys_page
= bfregn
/ bfregs_per_sys_page
;
644 offset
= bfregn
% bfregs_per_sys_page
/ MLX5_NON_FP_BFREGS_PER_UAR
;
646 return bfregi
->sys_pages
[index_of_sys_page
] + offset
;
649 static int mlx5_ib_umem_get(struct mlx5_ib_dev
*dev
,
651 unsigned long addr
, size_t size
,
652 struct ib_umem
**umem
,
653 int *npages
, int *page_shift
, int *ncont
,
658 *umem
= ib_umem_get(pd
->uobject
->context
, addr
, size
, 0, 0);
660 mlx5_ib_dbg(dev
, "umem_get failed\n");
661 return PTR_ERR(*umem
);
664 mlx5_ib_cont_pages(*umem
, addr
, 0, npages
, page_shift
, ncont
, NULL
);
666 err
= mlx5_ib_get_buf_offset(addr
, *page_shift
, offset
);
668 mlx5_ib_warn(dev
, "bad offset\n");
672 mlx5_ib_dbg(dev
, "addr 0x%lx, size %zu, npages %d, page_shift %d, ncont %d, offset %d\n",
673 addr
, size
, *npages
, *page_shift
, *ncont
, *offset
);
678 ib_umem_release(*umem
);
684 static void destroy_user_rq(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
685 struct mlx5_ib_rwq
*rwq
)
687 struct mlx5_ib_ucontext
*context
;
689 if (rwq
->create_flags
& MLX5_IB_WQ_FLAGS_DELAY_DROP
)
690 atomic_dec(&dev
->delay_drop
.rqs_cnt
);
692 context
= to_mucontext(pd
->uobject
->context
);
693 mlx5_ib_db_unmap_user(context
, &rwq
->db
);
695 ib_umem_release(rwq
->umem
);
698 static int create_user_rq(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
699 struct mlx5_ib_rwq
*rwq
,
700 struct mlx5_ib_create_wq
*ucmd
)
702 struct mlx5_ib_ucontext
*context
;
712 context
= to_mucontext(pd
->uobject
->context
);
713 rwq
->umem
= ib_umem_get(pd
->uobject
->context
, ucmd
->buf_addr
,
714 rwq
->buf_size
, 0, 0);
715 if (IS_ERR(rwq
->umem
)) {
716 mlx5_ib_dbg(dev
, "umem_get failed\n");
717 err
= PTR_ERR(rwq
->umem
);
721 mlx5_ib_cont_pages(rwq
->umem
, ucmd
->buf_addr
, 0, &npages
, &page_shift
,
723 err
= mlx5_ib_get_buf_offset(ucmd
->buf_addr
, page_shift
,
724 &rwq
->rq_page_offset
);
726 mlx5_ib_warn(dev
, "bad offset\n");
730 rwq
->rq_num_pas
= ncont
;
731 rwq
->page_shift
= page_shift
;
732 rwq
->log_page_size
= page_shift
- MLX5_ADAPTER_PAGE_SHIFT
;
733 rwq
->wq_sig
= !!(ucmd
->flags
& MLX5_WQ_FLAG_SIGNATURE
);
735 mlx5_ib_dbg(dev
, "addr 0x%llx, size %zd, npages %d, page_shift %d, ncont %d, offset %d\n",
736 (unsigned long long)ucmd
->buf_addr
, rwq
->buf_size
,
737 npages
, page_shift
, ncont
, offset
);
739 err
= mlx5_ib_db_map_user(context
, ucmd
->db_addr
, &rwq
->db
);
741 mlx5_ib_dbg(dev
, "map failed\n");
745 rwq
->create_type
= MLX5_WQ_USER
;
749 ib_umem_release(rwq
->umem
);
753 static int adjust_bfregn(struct mlx5_ib_dev
*dev
,
754 struct mlx5_bfreg_info
*bfregi
, int bfregn
)
756 return bfregn
/ MLX5_NON_FP_BFREGS_PER_UAR
* MLX5_BFREGS_PER_UAR
+
757 bfregn
% MLX5_NON_FP_BFREGS_PER_UAR
;
760 static int create_user_qp(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
761 struct mlx5_ib_qp
*qp
, struct ib_udata
*udata
,
762 struct ib_qp_init_attr
*attr
,
764 struct mlx5_ib_create_qp_resp
*resp
, int *inlen
,
765 struct mlx5_ib_qp_base
*base
)
767 struct mlx5_ib_ucontext
*context
;
768 struct mlx5_ib_create_qp ucmd
;
769 struct mlx5_ib_ubuffer
*ubuffer
= &base
->ubuffer
;
780 err
= ib_copy_from_udata(&ucmd
, udata
, sizeof(ucmd
));
782 mlx5_ib_dbg(dev
, "copy failed\n");
786 context
= to_mucontext(pd
->uobject
->context
);
788 * TBD: should come from the verbs when we have the API
790 if (qp
->flags
& MLX5_IB_QP_CROSS_CHANNEL
)
791 /* In CROSS_CHANNEL CQ and QP must use the same UAR */
792 bfregn
= MLX5_CROSS_CHANNEL_BFREG
;
794 bfregn
= alloc_bfreg(dev
, &context
->bfregi
, MLX5_IB_LATENCY_CLASS_HIGH
);
796 mlx5_ib_dbg(dev
, "failed to allocate low latency BFREG\n");
797 mlx5_ib_dbg(dev
, "reverting to medium latency\n");
798 bfregn
= alloc_bfreg(dev
, &context
->bfregi
, MLX5_IB_LATENCY_CLASS_MEDIUM
);
800 mlx5_ib_dbg(dev
, "failed to allocate medium latency BFREG\n");
801 mlx5_ib_dbg(dev
, "reverting to high latency\n");
802 bfregn
= alloc_bfreg(dev
, &context
->bfregi
, MLX5_IB_LATENCY_CLASS_LOW
);
804 mlx5_ib_warn(dev
, "bfreg allocation failed\n");
811 uar_index
= bfregn_to_uar_index(dev
, &context
->bfregi
, bfregn
);
812 mlx5_ib_dbg(dev
, "bfregn 0x%x, uar_index 0x%x\n", bfregn
, uar_index
);
815 qp
->sq
.wqe_shift
= ilog2(MLX5_SEND_WQE_BB
);
816 qp
->sq
.offset
= qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
;
818 err
= set_user_buf_size(dev
, qp
, &ucmd
, base
, attr
);
822 if (ucmd
.buf_addr
&& ubuffer
->buf_size
) {
823 ubuffer
->buf_addr
= ucmd
.buf_addr
;
824 err
= mlx5_ib_umem_get(dev
, pd
, ubuffer
->buf_addr
,
826 &ubuffer
->umem
, &npages
, &page_shift
,
831 ubuffer
->umem
= NULL
;
834 *inlen
= MLX5_ST_SZ_BYTES(create_qp_in
) +
835 MLX5_FLD_SZ_BYTES(create_qp_in
, pas
[0]) * ncont
;
836 *in
= kvzalloc(*inlen
, GFP_KERNEL
);
842 pas
= (__be64
*)MLX5_ADDR_OF(create_qp_in
, *in
, pas
);
844 mlx5_ib_populate_pas(dev
, ubuffer
->umem
, page_shift
, pas
, 0);
846 qpc
= MLX5_ADDR_OF(create_qp_in
, *in
, qpc
);
848 MLX5_SET(qpc
, qpc
, log_page_size
, page_shift
- MLX5_ADAPTER_PAGE_SHIFT
);
849 MLX5_SET(qpc
, qpc
, page_offset
, offset
);
851 MLX5_SET(qpc
, qpc
, uar_page
, uar_index
);
852 resp
->bfreg_index
= adjust_bfregn(dev
, &context
->bfregi
, bfregn
);
855 err
= mlx5_ib_db_map_user(context
, ucmd
.db_addr
, &qp
->db
);
857 mlx5_ib_dbg(dev
, "map failed\n");
861 err
= ib_copy_to_udata(udata
, resp
, sizeof(*resp
));
863 mlx5_ib_dbg(dev
, "copy failed\n");
866 qp
->create_type
= MLX5_QP_USER
;
871 mlx5_ib_db_unmap_user(context
, &qp
->db
);
878 ib_umem_release(ubuffer
->umem
);
881 free_bfreg(dev
, &context
->bfregi
, bfregn
);
885 static void destroy_qp_user(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
886 struct mlx5_ib_qp
*qp
, struct mlx5_ib_qp_base
*base
)
888 struct mlx5_ib_ucontext
*context
;
890 context
= to_mucontext(pd
->uobject
->context
);
891 mlx5_ib_db_unmap_user(context
, &qp
->db
);
892 if (base
->ubuffer
.umem
)
893 ib_umem_release(base
->ubuffer
.umem
);
894 free_bfreg(dev
, &context
->bfregi
, qp
->bfregn
);
897 static int create_kernel_qp(struct mlx5_ib_dev
*dev
,
898 struct ib_qp_init_attr
*init_attr
,
899 struct mlx5_ib_qp
*qp
,
900 u32
**in
, int *inlen
,
901 struct mlx5_ib_qp_base
*base
)
907 if (init_attr
->create_flags
& ~(IB_QP_CREATE_SIGNATURE_EN
|
908 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
|
909 IB_QP_CREATE_IPOIB_UD_LSO
|
910 IB_QP_CREATE_NETIF_QP
|
911 mlx5_ib_create_qp_sqpn_qp1()))
914 if (init_attr
->qp_type
== MLX5_IB_QPT_REG_UMR
)
915 qp
->bf
.bfreg
= &dev
->fp_bfreg
;
917 qp
->bf
.bfreg
= &dev
->bfreg
;
919 /* We need to divide by two since each register is comprised of
920 * two buffers of identical size, namely odd and even
922 qp
->bf
.buf_size
= (1 << MLX5_CAP_GEN(dev
->mdev
, log_bf_reg_size
)) / 2;
923 uar_index
= qp
->bf
.bfreg
->index
;
925 err
= calc_sq_size(dev
, init_attr
, qp
);
927 mlx5_ib_dbg(dev
, "err %d\n", err
);
932 qp
->sq
.offset
= qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
;
933 base
->ubuffer
.buf_size
= err
+ (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
);
935 err
= mlx5_buf_alloc(dev
->mdev
, base
->ubuffer
.buf_size
, &qp
->buf
);
937 mlx5_ib_dbg(dev
, "err %d\n", err
);
941 qp
->sq
.qend
= mlx5_get_send_wqe(qp
, qp
->sq
.wqe_cnt
);
942 *inlen
= MLX5_ST_SZ_BYTES(create_qp_in
) +
943 MLX5_FLD_SZ_BYTES(create_qp_in
, pas
[0]) * qp
->buf
.npages
;
944 *in
= kvzalloc(*inlen
, GFP_KERNEL
);
950 qpc
= MLX5_ADDR_OF(create_qp_in
, *in
, qpc
);
951 MLX5_SET(qpc
, qpc
, uar_page
, uar_index
);
952 MLX5_SET(qpc
, qpc
, log_page_size
, qp
->buf
.page_shift
- MLX5_ADAPTER_PAGE_SHIFT
);
954 /* Set "fast registration enabled" for all kernel QPs */
955 MLX5_SET(qpc
, qpc
, fre
, 1);
956 MLX5_SET(qpc
, qpc
, rlky
, 1);
958 if (init_attr
->create_flags
& mlx5_ib_create_qp_sqpn_qp1()) {
959 MLX5_SET(qpc
, qpc
, deth_sqpn
, 1);
960 qp
->flags
|= MLX5_IB_QP_SQPN_QP1
;
963 mlx5_fill_page_array(&qp
->buf
,
964 (__be64
*)MLX5_ADDR_OF(create_qp_in
, *in
, pas
));
966 err
= mlx5_db_alloc(dev
->mdev
, &qp
->db
);
968 mlx5_ib_dbg(dev
, "err %d\n", err
);
972 qp
->sq
.wrid
= kvmalloc_array(qp
->sq
.wqe_cnt
,
973 sizeof(*qp
->sq
.wrid
), GFP_KERNEL
);
974 qp
->sq
.wr_data
= kvmalloc_array(qp
->sq
.wqe_cnt
,
975 sizeof(*qp
->sq
.wr_data
), GFP_KERNEL
);
976 qp
->rq
.wrid
= kvmalloc_array(qp
->rq
.wqe_cnt
,
977 sizeof(*qp
->rq
.wrid
), GFP_KERNEL
);
978 qp
->sq
.w_list
= kvmalloc_array(qp
->sq
.wqe_cnt
,
979 sizeof(*qp
->sq
.w_list
), GFP_KERNEL
);
980 qp
->sq
.wqe_head
= kvmalloc_array(qp
->sq
.wqe_cnt
,
981 sizeof(*qp
->sq
.wqe_head
), GFP_KERNEL
);
983 if (!qp
->sq
.wrid
|| !qp
->sq
.wr_data
|| !qp
->rq
.wrid
||
984 !qp
->sq
.w_list
|| !qp
->sq
.wqe_head
) {
988 qp
->create_type
= MLX5_QP_KERNEL
;
993 kvfree(qp
->sq
.wqe_head
);
994 kvfree(qp
->sq
.w_list
);
996 kvfree(qp
->sq
.wr_data
);
998 mlx5_db_free(dev
->mdev
, &qp
->db
);
1004 mlx5_buf_free(dev
->mdev
, &qp
->buf
);
1008 static void destroy_qp_kernel(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
)
1010 kvfree(qp
->sq
.wqe_head
);
1011 kvfree(qp
->sq
.w_list
);
1012 kvfree(qp
->sq
.wrid
);
1013 kvfree(qp
->sq
.wr_data
);
1014 kvfree(qp
->rq
.wrid
);
1015 mlx5_db_free(dev
->mdev
, &qp
->db
);
1016 mlx5_buf_free(dev
->mdev
, &qp
->buf
);
1019 static u32
get_rx_type(struct mlx5_ib_qp
*qp
, struct ib_qp_init_attr
*attr
)
1021 if (attr
->srq
|| (attr
->qp_type
== IB_QPT_XRC_TGT
) ||
1022 (attr
->qp_type
== IB_QPT_XRC_INI
))
1024 else if (!qp
->has_rq
)
1025 return MLX5_ZERO_LEN_RQ
;
1027 return MLX5_NON_ZERO_RQ
;
1030 static int is_connected(enum ib_qp_type qp_type
)
1032 if (qp_type
== IB_QPT_RC
|| qp_type
== IB_QPT_UC
)
1038 static int create_raw_packet_qp_tis(struct mlx5_ib_dev
*dev
,
1039 struct mlx5_ib_qp
*qp
,
1040 struct mlx5_ib_sq
*sq
, u32 tdn
)
1042 u32 in
[MLX5_ST_SZ_DW(create_tis_in
)] = {0};
1043 void *tisc
= MLX5_ADDR_OF(create_tis_in
, in
, ctx
);
1045 MLX5_SET(tisc
, tisc
, transport_domain
, tdn
);
1046 if (qp
->flags
& MLX5_IB_QP_UNDERLAY
)
1047 MLX5_SET(tisc
, tisc
, underlay_qpn
, qp
->underlay_qpn
);
1049 return mlx5_core_create_tis(dev
->mdev
, in
, sizeof(in
), &sq
->tisn
);
1052 static void destroy_raw_packet_qp_tis(struct mlx5_ib_dev
*dev
,
1053 struct mlx5_ib_sq
*sq
)
1055 mlx5_core_destroy_tis(dev
->mdev
, sq
->tisn
);
1058 static int create_raw_packet_qp_sq(struct mlx5_ib_dev
*dev
,
1059 struct mlx5_ib_sq
*sq
, void *qpin
,
1062 struct mlx5_ib_ubuffer
*ubuffer
= &sq
->ubuffer
;
1066 void *qpc
= MLX5_ADDR_OF(create_qp_in
, qpin
, qpc
);
1075 err
= mlx5_ib_umem_get(dev
, pd
, ubuffer
->buf_addr
, ubuffer
->buf_size
,
1076 &sq
->ubuffer
.umem
, &npages
, &page_shift
,
1081 inlen
= MLX5_ST_SZ_BYTES(create_sq_in
) + sizeof(u64
) * ncont
;
1082 in
= kvzalloc(inlen
, GFP_KERNEL
);
1088 sqc
= MLX5_ADDR_OF(create_sq_in
, in
, ctx
);
1089 MLX5_SET(sqc
, sqc
, flush_in_error_en
, 1);
1090 if (MLX5_CAP_ETH(dev
->mdev
, multi_pkt_send_wqe
))
1091 MLX5_SET(sqc
, sqc
, allow_multi_pkt_send_wqe
, 1);
1092 MLX5_SET(sqc
, sqc
, state
, MLX5_SQC_STATE_RST
);
1093 MLX5_SET(sqc
, sqc
, user_index
, MLX5_GET(qpc
, qpc
, user_index
));
1094 MLX5_SET(sqc
, sqc
, cqn
, MLX5_GET(qpc
, qpc
, cqn_snd
));
1095 MLX5_SET(sqc
, sqc
, tis_lst_sz
, 1);
1096 MLX5_SET(sqc
, sqc
, tis_num_0
, sq
->tisn
);
1097 if (MLX5_CAP_GEN(dev
->mdev
, eth_net_offloads
) &&
1098 MLX5_CAP_ETH(dev
->mdev
, swp
))
1099 MLX5_SET(sqc
, sqc
, allow_swp
, 1);
1101 wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
1102 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_CYCLIC
);
1103 MLX5_SET(wq
, wq
, pd
, MLX5_GET(qpc
, qpc
, pd
));
1104 MLX5_SET(wq
, wq
, uar_page
, MLX5_GET(qpc
, qpc
, uar_page
));
1105 MLX5_SET64(wq
, wq
, dbr_addr
, MLX5_GET64(qpc
, qpc
, dbr_addr
));
1106 MLX5_SET(wq
, wq
, log_wq_stride
, ilog2(MLX5_SEND_WQE_BB
));
1107 MLX5_SET(wq
, wq
, log_wq_sz
, MLX5_GET(qpc
, qpc
, log_sq_size
));
1108 MLX5_SET(wq
, wq
, log_wq_pg_sz
, page_shift
- MLX5_ADAPTER_PAGE_SHIFT
);
1109 MLX5_SET(wq
, wq
, page_offset
, offset
);
1111 pas
= (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
);
1112 mlx5_ib_populate_pas(dev
, sq
->ubuffer
.umem
, page_shift
, pas
, 0);
1114 err
= mlx5_core_create_sq_tracked(dev
->mdev
, in
, inlen
, &sq
->base
.mqp
);
1124 ib_umem_release(sq
->ubuffer
.umem
);
1125 sq
->ubuffer
.umem
= NULL
;
1130 static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev
*dev
,
1131 struct mlx5_ib_sq
*sq
)
1133 mlx5_core_destroy_sq_tracked(dev
->mdev
, &sq
->base
.mqp
);
1134 ib_umem_release(sq
->ubuffer
.umem
);
1137 static size_t get_rq_pas_size(void *qpc
)
1139 u32 log_page_size
= MLX5_GET(qpc
, qpc
, log_page_size
) + 12;
1140 u32 log_rq_stride
= MLX5_GET(qpc
, qpc
, log_rq_stride
);
1141 u32 log_rq_size
= MLX5_GET(qpc
, qpc
, log_rq_size
);
1142 u32 page_offset
= MLX5_GET(qpc
, qpc
, page_offset
);
1143 u32 po_quanta
= 1 << (log_page_size
- 6);
1144 u32 rq_sz
= 1 << (log_rq_size
+ 4 + log_rq_stride
);
1145 u32 page_size
= 1 << log_page_size
;
1146 u32 rq_sz_po
= rq_sz
+ (page_offset
* po_quanta
);
1147 u32 rq_num_pas
= (rq_sz_po
+ page_size
- 1) / page_size
;
1149 return rq_num_pas
* sizeof(u64
);
1152 static int create_raw_packet_qp_rq(struct mlx5_ib_dev
*dev
,
1153 struct mlx5_ib_rq
*rq
, void *qpin
,
1156 struct mlx5_ib_qp
*mqp
= rq
->base
.container_mibqp
;
1162 void *qpc
= MLX5_ADDR_OF(create_qp_in
, qpin
, qpc
);
1163 size_t rq_pas_size
= get_rq_pas_size(qpc
);
1167 if (qpinlen
< rq_pas_size
+ MLX5_BYTE_OFF(create_qp_in
, pas
))
1170 inlen
= MLX5_ST_SZ_BYTES(create_rq_in
) + rq_pas_size
;
1171 in
= kvzalloc(inlen
, GFP_KERNEL
);
1175 rqc
= MLX5_ADDR_OF(create_rq_in
, in
, ctx
);
1176 if (!(rq
->flags
& MLX5_IB_RQ_CVLAN_STRIPPING
))
1177 MLX5_SET(rqc
, rqc
, vsd
, 1);
1178 MLX5_SET(rqc
, rqc
, mem_rq_type
, MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE
);
1179 MLX5_SET(rqc
, rqc
, state
, MLX5_RQC_STATE_RST
);
1180 MLX5_SET(rqc
, rqc
, flush_in_error_en
, 1);
1181 MLX5_SET(rqc
, rqc
, user_index
, MLX5_GET(qpc
, qpc
, user_index
));
1182 MLX5_SET(rqc
, rqc
, cqn
, MLX5_GET(qpc
, qpc
, cqn_rcv
));
1184 if (mqp
->flags
& MLX5_IB_QP_CAP_SCATTER_FCS
)
1185 MLX5_SET(rqc
, rqc
, scatter_fcs
, 1);
1187 wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
1188 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_CYCLIC
);
1189 if (rq
->flags
& MLX5_IB_RQ_PCI_WRITE_END_PADDING
)
1190 MLX5_SET(wq
, wq
, end_padding_mode
, MLX5_WQ_END_PAD_MODE_ALIGN
);
1191 MLX5_SET(wq
, wq
, page_offset
, MLX5_GET(qpc
, qpc
, page_offset
));
1192 MLX5_SET(wq
, wq
, pd
, MLX5_GET(qpc
, qpc
, pd
));
1193 MLX5_SET64(wq
, wq
, dbr_addr
, MLX5_GET64(qpc
, qpc
, dbr_addr
));
1194 MLX5_SET(wq
, wq
, log_wq_stride
, MLX5_GET(qpc
, qpc
, log_rq_stride
) + 4);
1195 MLX5_SET(wq
, wq
, log_wq_pg_sz
, MLX5_GET(qpc
, qpc
, log_page_size
));
1196 MLX5_SET(wq
, wq
, log_wq_sz
, MLX5_GET(qpc
, qpc
, log_rq_size
));
1198 pas
= (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
);
1199 qp_pas
= (__be64
*)MLX5_ADDR_OF(create_qp_in
, qpin
, pas
);
1200 memcpy(pas
, qp_pas
, rq_pas_size
);
1202 err
= mlx5_core_create_rq_tracked(dev
->mdev
, in
, inlen
, &rq
->base
.mqp
);
1209 static void destroy_raw_packet_qp_rq(struct mlx5_ib_dev
*dev
,
1210 struct mlx5_ib_rq
*rq
)
1212 mlx5_core_destroy_rq_tracked(dev
->mdev
, &rq
->base
.mqp
);
1215 static bool tunnel_offload_supported(struct mlx5_core_dev
*dev
)
1217 return (MLX5_CAP_ETH(dev
, tunnel_stateless_vxlan
) ||
1218 MLX5_CAP_ETH(dev
, tunnel_stateless_gre
) ||
1219 MLX5_CAP_ETH(dev
, tunnel_stateless_geneve_rx
));
1222 static int create_raw_packet_qp_tir(struct mlx5_ib_dev
*dev
,
1223 struct mlx5_ib_rq
*rq
, u32 tdn
,
1224 bool tunnel_offload_en
)
1231 inlen
= MLX5_ST_SZ_BYTES(create_tir_in
);
1232 in
= kvzalloc(inlen
, GFP_KERNEL
);
1236 tirc
= MLX5_ADDR_OF(create_tir_in
, in
, ctx
);
1237 MLX5_SET(tirc
, tirc
, disp_type
, MLX5_TIRC_DISP_TYPE_DIRECT
);
1238 MLX5_SET(tirc
, tirc
, inline_rqn
, rq
->base
.mqp
.qpn
);
1239 MLX5_SET(tirc
, tirc
, transport_domain
, tdn
);
1240 if (tunnel_offload_en
)
1241 MLX5_SET(tirc
, tirc
, tunneled_offload_en
, 1);
1243 err
= mlx5_core_create_tir(dev
->mdev
, in
, inlen
, &rq
->tirn
);
1250 static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev
*dev
,
1251 struct mlx5_ib_rq
*rq
)
1253 mlx5_core_destroy_tir(dev
->mdev
, rq
->tirn
);
1256 static int create_raw_packet_qp(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
1257 u32
*in
, size_t inlen
,
1260 struct mlx5_ib_raw_packet_qp
*raw_packet_qp
= &qp
->raw_packet_qp
;
1261 struct mlx5_ib_sq
*sq
= &raw_packet_qp
->sq
;
1262 struct mlx5_ib_rq
*rq
= &raw_packet_qp
->rq
;
1263 struct ib_uobject
*uobj
= pd
->uobject
;
1264 struct ib_ucontext
*ucontext
= uobj
->context
;
1265 struct mlx5_ib_ucontext
*mucontext
= to_mucontext(ucontext
);
1267 u32 tdn
= mucontext
->tdn
;
1269 if (qp
->sq
.wqe_cnt
) {
1270 err
= create_raw_packet_qp_tis(dev
, qp
, sq
, tdn
);
1274 err
= create_raw_packet_qp_sq(dev
, sq
, in
, pd
);
1276 goto err_destroy_tis
;
1278 sq
->base
.container_mibqp
= qp
;
1279 sq
->base
.mqp
.event
= mlx5_ib_qp_event
;
1282 if (qp
->rq
.wqe_cnt
) {
1283 rq
->base
.container_mibqp
= qp
;
1285 if (qp
->flags
& MLX5_IB_QP_CVLAN_STRIPPING
)
1286 rq
->flags
|= MLX5_IB_RQ_CVLAN_STRIPPING
;
1287 if (qp
->flags
& MLX5_IB_QP_PCI_WRITE_END_PADDING
)
1288 rq
->flags
|= MLX5_IB_RQ_PCI_WRITE_END_PADDING
;
1289 err
= create_raw_packet_qp_rq(dev
, rq
, in
, inlen
);
1291 goto err_destroy_sq
;
1294 err
= create_raw_packet_qp_tir(dev
, rq
, tdn
,
1295 qp
->tunnel_offload_en
);
1297 goto err_destroy_rq
;
1300 qp
->trans_qp
.base
.mqp
.qpn
= qp
->sq
.wqe_cnt
? sq
->base
.mqp
.qpn
:
1306 destroy_raw_packet_qp_rq(dev
, rq
);
1308 if (!qp
->sq
.wqe_cnt
)
1310 destroy_raw_packet_qp_sq(dev
, sq
);
1312 destroy_raw_packet_qp_tis(dev
, sq
);
1317 static void destroy_raw_packet_qp(struct mlx5_ib_dev
*dev
,
1318 struct mlx5_ib_qp
*qp
)
1320 struct mlx5_ib_raw_packet_qp
*raw_packet_qp
= &qp
->raw_packet_qp
;
1321 struct mlx5_ib_sq
*sq
= &raw_packet_qp
->sq
;
1322 struct mlx5_ib_rq
*rq
= &raw_packet_qp
->rq
;
1324 if (qp
->rq
.wqe_cnt
) {
1325 destroy_raw_packet_qp_tir(dev
, rq
);
1326 destroy_raw_packet_qp_rq(dev
, rq
);
1329 if (qp
->sq
.wqe_cnt
) {
1330 destroy_raw_packet_qp_sq(dev
, sq
);
1331 destroy_raw_packet_qp_tis(dev
, sq
);
1335 static void raw_packet_qp_copy_info(struct mlx5_ib_qp
*qp
,
1336 struct mlx5_ib_raw_packet_qp
*raw_packet_qp
)
1338 struct mlx5_ib_sq
*sq
= &raw_packet_qp
->sq
;
1339 struct mlx5_ib_rq
*rq
= &raw_packet_qp
->rq
;
1343 sq
->doorbell
= &qp
->db
;
1344 rq
->doorbell
= &qp
->db
;
1347 static void destroy_rss_raw_qp_tir(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
)
1349 mlx5_core_destroy_tir(dev
->mdev
, qp
->rss_qp
.tirn
);
1352 static int create_rss_raw_qp_tir(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
1354 struct ib_qp_init_attr
*init_attr
,
1355 struct ib_udata
*udata
)
1357 struct ib_uobject
*uobj
= pd
->uobject
;
1358 struct ib_ucontext
*ucontext
= uobj
->context
;
1359 struct mlx5_ib_ucontext
*mucontext
= to_mucontext(ucontext
);
1360 struct mlx5_ib_create_qp_resp resp
= {};
1366 u32 selected_fields
= 0;
1367 size_t min_resp_len
;
1368 u32 tdn
= mucontext
->tdn
;
1369 struct mlx5_ib_create_qp_rss ucmd
= {};
1370 size_t required_cmd_sz
;
1372 if (init_attr
->qp_type
!= IB_QPT_RAW_PACKET
)
1375 if (init_attr
->create_flags
|| init_attr
->send_cq
)
1378 min_resp_len
= offsetof(typeof(resp
), bfreg_index
) + sizeof(resp
.bfreg_index
);
1379 if (udata
->outlen
< min_resp_len
)
1382 required_cmd_sz
= offsetof(typeof(ucmd
), flags
) + sizeof(ucmd
.flags
);
1383 if (udata
->inlen
< required_cmd_sz
) {
1384 mlx5_ib_dbg(dev
, "invalid inlen\n");
1388 if (udata
->inlen
> sizeof(ucmd
) &&
1389 !ib_is_udata_cleared(udata
, sizeof(ucmd
),
1390 udata
->inlen
- sizeof(ucmd
))) {
1391 mlx5_ib_dbg(dev
, "inlen is not supported\n");
1395 if (ib_copy_from_udata(&ucmd
, udata
, min(sizeof(ucmd
), udata
->inlen
))) {
1396 mlx5_ib_dbg(dev
, "copy failed\n");
1400 if (ucmd
.comp_mask
) {
1401 mlx5_ib_dbg(dev
, "invalid comp mask\n");
1405 if (ucmd
.flags
& ~MLX5_QP_FLAG_TUNNEL_OFFLOADS
) {
1406 mlx5_ib_dbg(dev
, "invalid flags\n");
1410 if (ucmd
.flags
& MLX5_QP_FLAG_TUNNEL_OFFLOADS
&&
1411 !tunnel_offload_supported(dev
->mdev
)) {
1412 mlx5_ib_dbg(dev
, "tunnel offloads isn't supported\n");
1416 if (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_INNER
&&
1417 !(ucmd
.flags
& MLX5_QP_FLAG_TUNNEL_OFFLOADS
)) {
1418 mlx5_ib_dbg(dev
, "Tunnel offloads must be set for inner RSS\n");
1422 err
= ib_copy_to_udata(udata
, &resp
, min_resp_len
);
1424 mlx5_ib_dbg(dev
, "copy failed\n");
1428 inlen
= MLX5_ST_SZ_BYTES(create_tir_in
);
1429 in
= kvzalloc(inlen
, GFP_KERNEL
);
1433 tirc
= MLX5_ADDR_OF(create_tir_in
, in
, ctx
);
1434 MLX5_SET(tirc
, tirc
, disp_type
,
1435 MLX5_TIRC_DISP_TYPE_INDIRECT
);
1436 MLX5_SET(tirc
, tirc
, indirect_table
,
1437 init_attr
->rwq_ind_tbl
->ind_tbl_num
);
1438 MLX5_SET(tirc
, tirc
, transport_domain
, tdn
);
1440 hfso
= MLX5_ADDR_OF(tirc
, tirc
, rx_hash_field_selector_outer
);
1442 if (ucmd
.flags
& MLX5_QP_FLAG_TUNNEL_OFFLOADS
)
1443 MLX5_SET(tirc
, tirc
, tunneled_offload_en
, 1);
1445 if (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_INNER
)
1446 hfso
= MLX5_ADDR_OF(tirc
, tirc
, rx_hash_field_selector_inner
);
1448 hfso
= MLX5_ADDR_OF(tirc
, tirc
, rx_hash_field_selector_outer
);
1450 switch (ucmd
.rx_hash_function
) {
1451 case MLX5_RX_HASH_FUNC_TOEPLITZ
:
1453 void *rss_key
= MLX5_ADDR_OF(tirc
, tirc
, rx_hash_toeplitz_key
);
1454 size_t len
= MLX5_FLD_SZ_BYTES(tirc
, rx_hash_toeplitz_key
);
1456 if (len
!= ucmd
.rx_key_len
) {
1461 MLX5_SET(tirc
, tirc
, rx_hash_fn
, MLX5_RX_HASH_FN_TOEPLITZ
);
1462 memcpy(rss_key
, ucmd
.rx_hash_key
, len
);
1470 if (!ucmd
.rx_hash_fields_mask
) {
1471 /* special case when this TIR serves as steering entry without hashing */
1472 if (!init_attr
->rwq_ind_tbl
->log_ind_tbl_size
)
1478 if (((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_IPV4
) ||
1479 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_IPV4
)) &&
1480 ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_IPV6
) ||
1481 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_IPV6
))) {
1486 /* If none of IPV4 & IPV6 SRC/DST was set - this bit field is ignored */
1487 if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_IPV4
) ||
1488 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_IPV4
))
1489 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1490 MLX5_L3_PROT_TYPE_IPV4
);
1491 else if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_IPV6
) ||
1492 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_IPV6
))
1493 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1494 MLX5_L3_PROT_TYPE_IPV6
);
1496 if (((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_PORT_TCP
) ||
1497 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_PORT_TCP
)) &&
1498 ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_PORT_UDP
) ||
1499 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_PORT_UDP
))) {
1504 /* If none of TCP & UDP SRC/DST was set - this bit field is ignored */
1505 if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_PORT_TCP
) ||
1506 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_PORT_TCP
))
1507 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
1508 MLX5_L4_PROT_TYPE_TCP
);
1509 else if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_PORT_UDP
) ||
1510 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_PORT_UDP
))
1511 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
1512 MLX5_L4_PROT_TYPE_UDP
);
1514 if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_IPV4
) ||
1515 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_IPV6
))
1516 selected_fields
|= MLX5_HASH_FIELD_SEL_SRC_IP
;
1518 if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_IPV4
) ||
1519 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_IPV6
))
1520 selected_fields
|= MLX5_HASH_FIELD_SEL_DST_IP
;
1522 if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_PORT_TCP
) ||
1523 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_PORT_UDP
))
1524 selected_fields
|= MLX5_HASH_FIELD_SEL_L4_SPORT
;
1526 if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_PORT_TCP
) ||
1527 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_PORT_UDP
))
1528 selected_fields
|= MLX5_HASH_FIELD_SEL_L4_DPORT
;
1530 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
, selected_fields
);
1533 err
= mlx5_core_create_tir(dev
->mdev
, in
, inlen
, &qp
->rss_qp
.tirn
);
1539 /* qpn is reserved for that QP */
1540 qp
->trans_qp
.base
.mqp
.qpn
= 0;
1541 qp
->flags
|= MLX5_IB_QP_RSS
;
1549 static int create_qp_common(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
1550 struct ib_qp_init_attr
*init_attr
,
1551 struct ib_udata
*udata
, struct mlx5_ib_qp
*qp
)
1553 struct mlx5_ib_resources
*devr
= &dev
->devr
;
1554 int inlen
= MLX5_ST_SZ_BYTES(create_qp_in
);
1555 struct mlx5_core_dev
*mdev
= dev
->mdev
;
1556 struct mlx5_ib_create_qp_resp resp
= {};
1557 struct mlx5_ib_cq
*send_cq
;
1558 struct mlx5_ib_cq
*recv_cq
;
1559 unsigned long flags
;
1560 u32 uidx
= MLX5_IB_DEFAULT_UIDX
;
1561 struct mlx5_ib_create_qp ucmd
;
1562 struct mlx5_ib_qp_base
*base
;
1568 mutex_init(&qp
->mutex
);
1569 spin_lock_init(&qp
->sq
.lock
);
1570 spin_lock_init(&qp
->rq
.lock
);
1572 mlx5_st
= to_mlx5_st(init_attr
->qp_type
);
1576 if (init_attr
->rwq_ind_tbl
) {
1580 err
= create_rss_raw_qp_tir(dev
, qp
, pd
, init_attr
, udata
);
1584 if (init_attr
->create_flags
& IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
) {
1585 if (!MLX5_CAP_GEN(mdev
, block_lb_mc
)) {
1586 mlx5_ib_dbg(dev
, "block multicast loopback isn't supported\n");
1589 qp
->flags
|= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK
;
1593 if (init_attr
->create_flags
&
1594 (IB_QP_CREATE_CROSS_CHANNEL
|
1595 IB_QP_CREATE_MANAGED_SEND
|
1596 IB_QP_CREATE_MANAGED_RECV
)) {
1597 if (!MLX5_CAP_GEN(mdev
, cd
)) {
1598 mlx5_ib_dbg(dev
, "cross-channel isn't supported\n");
1601 if (init_attr
->create_flags
& IB_QP_CREATE_CROSS_CHANNEL
)
1602 qp
->flags
|= MLX5_IB_QP_CROSS_CHANNEL
;
1603 if (init_attr
->create_flags
& IB_QP_CREATE_MANAGED_SEND
)
1604 qp
->flags
|= MLX5_IB_QP_MANAGED_SEND
;
1605 if (init_attr
->create_flags
& IB_QP_CREATE_MANAGED_RECV
)
1606 qp
->flags
|= MLX5_IB_QP_MANAGED_RECV
;
1609 if (init_attr
->qp_type
== IB_QPT_UD
&&
1610 (init_attr
->create_flags
& IB_QP_CREATE_IPOIB_UD_LSO
))
1611 if (!MLX5_CAP_GEN(mdev
, ipoib_basic_offloads
)) {
1612 mlx5_ib_dbg(dev
, "ipoib UD lso qp isn't supported\n");
1616 if (init_attr
->create_flags
& IB_QP_CREATE_SCATTER_FCS
) {
1617 if (init_attr
->qp_type
!= IB_QPT_RAW_PACKET
) {
1618 mlx5_ib_dbg(dev
, "Scatter FCS is supported only for Raw Packet QPs");
1621 if (!MLX5_CAP_GEN(dev
->mdev
, eth_net_offloads
) ||
1622 !MLX5_CAP_ETH(dev
->mdev
, scatter_fcs
)) {
1623 mlx5_ib_dbg(dev
, "Scatter FCS isn't supported\n");
1626 qp
->flags
|= MLX5_IB_QP_CAP_SCATTER_FCS
;
1629 if (init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
)
1630 qp
->sq_signal_bits
= MLX5_WQE_CTRL_CQ_UPDATE
;
1632 if (init_attr
->create_flags
& IB_QP_CREATE_CVLAN_STRIPPING
) {
1633 if (!(MLX5_CAP_GEN(dev
->mdev
, eth_net_offloads
) &&
1634 MLX5_CAP_ETH(dev
->mdev
, vlan_cap
)) ||
1635 (init_attr
->qp_type
!= IB_QPT_RAW_PACKET
))
1637 qp
->flags
|= MLX5_IB_QP_CVLAN_STRIPPING
;
1640 if (pd
&& pd
->uobject
) {
1641 if (ib_copy_from_udata(&ucmd
, udata
, sizeof(ucmd
))) {
1642 mlx5_ib_dbg(dev
, "copy failed\n");
1646 err
= get_qp_user_index(to_mucontext(pd
->uobject
->context
),
1647 &ucmd
, udata
->inlen
, &uidx
);
1651 qp
->wq_sig
= !!(ucmd
.flags
& MLX5_QP_FLAG_SIGNATURE
);
1652 qp
->scat_cqe
= !!(ucmd
.flags
& MLX5_QP_FLAG_SCATTER_CQE
);
1653 if (ucmd
.flags
& MLX5_QP_FLAG_TUNNEL_OFFLOADS
) {
1654 if (init_attr
->qp_type
!= IB_QPT_RAW_PACKET
||
1655 !tunnel_offload_supported(mdev
)) {
1656 mlx5_ib_dbg(dev
, "Tunnel offload isn't supported\n");
1659 qp
->tunnel_offload_en
= true;
1662 if (init_attr
->create_flags
& IB_QP_CREATE_SOURCE_QPN
) {
1663 if (init_attr
->qp_type
!= IB_QPT_UD
||
1664 (MLX5_CAP_GEN(dev
->mdev
, port_type
) !=
1665 MLX5_CAP_PORT_TYPE_IB
) ||
1666 !mlx5_get_flow_namespace(dev
->mdev
, MLX5_FLOW_NAMESPACE_BYPASS
)) {
1667 mlx5_ib_dbg(dev
, "Source QP option isn't supported\n");
1671 qp
->flags
|= MLX5_IB_QP_UNDERLAY
;
1672 qp
->underlay_qpn
= init_attr
->source_qpn
;
1675 qp
->wq_sig
= !!wq_signature
;
1678 base
= (init_attr
->qp_type
== IB_QPT_RAW_PACKET
||
1679 qp
->flags
& MLX5_IB_QP_UNDERLAY
) ?
1680 &qp
->raw_packet_qp
.rq
.base
:
1683 qp
->has_rq
= qp_has_rq(init_attr
);
1684 err
= set_rq_size(dev
, &init_attr
->cap
, qp
->has_rq
,
1685 qp
, (pd
&& pd
->uobject
) ? &ucmd
: NULL
);
1687 mlx5_ib_dbg(dev
, "err %d\n", err
);
1694 1 << MLX5_CAP_GEN(mdev
, log_max_qp_sz
);
1695 mlx5_ib_dbg(dev
, "requested sq_wqe_count (%d)\n", ucmd
.sq_wqe_count
);
1696 if (ucmd
.rq_wqe_shift
!= qp
->rq
.wqe_shift
||
1697 ucmd
.rq_wqe_count
!= qp
->rq
.wqe_cnt
) {
1698 mlx5_ib_dbg(dev
, "invalid rq params\n");
1701 if (ucmd
.sq_wqe_count
> max_wqes
) {
1702 mlx5_ib_dbg(dev
, "requested sq_wqe_count (%d) > max allowed (%d)\n",
1703 ucmd
.sq_wqe_count
, max_wqes
);
1706 if (init_attr
->create_flags
&
1707 mlx5_ib_create_qp_sqpn_qp1()) {
1708 mlx5_ib_dbg(dev
, "user-space is not allowed to create UD QPs spoofing as QP1\n");
1711 err
= create_user_qp(dev
, pd
, qp
, udata
, init_attr
, &in
,
1712 &resp
, &inlen
, base
);
1714 mlx5_ib_dbg(dev
, "err %d\n", err
);
1716 err
= create_kernel_qp(dev
, init_attr
, qp
, &in
, &inlen
,
1719 mlx5_ib_dbg(dev
, "err %d\n", err
);
1725 in
= kvzalloc(inlen
, GFP_KERNEL
);
1729 qp
->create_type
= MLX5_QP_EMPTY
;
1732 if (is_sqp(init_attr
->qp_type
))
1733 qp
->port
= init_attr
->port_num
;
1735 qpc
= MLX5_ADDR_OF(create_qp_in
, in
, qpc
);
1737 MLX5_SET(qpc
, qpc
, st
, mlx5_st
);
1738 MLX5_SET(qpc
, qpc
, pm_state
, MLX5_QP_PM_MIGRATED
);
1740 if (init_attr
->qp_type
!= MLX5_IB_QPT_REG_UMR
)
1741 MLX5_SET(qpc
, qpc
, pd
, to_mpd(pd
? pd
: devr
->p0
)->pdn
);
1743 MLX5_SET(qpc
, qpc
, latency_sensitive
, 1);
1747 MLX5_SET(qpc
, qpc
, wq_signature
, 1);
1749 if (qp
->flags
& MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK
)
1750 MLX5_SET(qpc
, qpc
, block_lb_mc
, 1);
1752 if (qp
->flags
& MLX5_IB_QP_CROSS_CHANNEL
)
1753 MLX5_SET(qpc
, qpc
, cd_master
, 1);
1754 if (qp
->flags
& MLX5_IB_QP_MANAGED_SEND
)
1755 MLX5_SET(qpc
, qpc
, cd_slave_send
, 1);
1756 if (qp
->flags
& MLX5_IB_QP_MANAGED_RECV
)
1757 MLX5_SET(qpc
, qpc
, cd_slave_receive
, 1);
1759 if (qp
->scat_cqe
&& is_connected(init_attr
->qp_type
)) {
1763 rcqe_sz
= mlx5_ib_get_cqe_size(dev
, init_attr
->recv_cq
);
1764 scqe_sz
= mlx5_ib_get_cqe_size(dev
, init_attr
->send_cq
);
1767 MLX5_SET(qpc
, qpc
, cs_res
, MLX5_RES_SCAT_DATA64_CQE
);
1769 MLX5_SET(qpc
, qpc
, cs_res
, MLX5_RES_SCAT_DATA32_CQE
);
1771 if (init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
) {
1773 MLX5_SET(qpc
, qpc
, cs_req
, MLX5_REQ_SCAT_DATA64_CQE
);
1775 MLX5_SET(qpc
, qpc
, cs_req
, MLX5_REQ_SCAT_DATA32_CQE
);
1779 if (qp
->rq
.wqe_cnt
) {
1780 MLX5_SET(qpc
, qpc
, log_rq_stride
, qp
->rq
.wqe_shift
- 4);
1781 MLX5_SET(qpc
, qpc
, log_rq_size
, ilog2(qp
->rq
.wqe_cnt
));
1784 MLX5_SET(qpc
, qpc
, rq_type
, get_rx_type(qp
, init_attr
));
1786 if (qp
->sq
.wqe_cnt
) {
1787 MLX5_SET(qpc
, qpc
, log_sq_size
, ilog2(qp
->sq
.wqe_cnt
));
1789 MLX5_SET(qpc
, qpc
, no_sq
, 1);
1790 if (init_attr
->srq
&&
1791 init_attr
->srq
->srq_type
== IB_SRQT_TM
)
1792 MLX5_SET(qpc
, qpc
, offload_type
,
1793 MLX5_QPC_OFFLOAD_TYPE_RNDV
);
1796 /* Set default resources */
1797 switch (init_attr
->qp_type
) {
1798 case IB_QPT_XRC_TGT
:
1799 MLX5_SET(qpc
, qpc
, cqn_rcv
, to_mcq(devr
->c0
)->mcq
.cqn
);
1800 MLX5_SET(qpc
, qpc
, cqn_snd
, to_mcq(devr
->c0
)->mcq
.cqn
);
1801 MLX5_SET(qpc
, qpc
, srqn_rmpn_xrqn
, to_msrq(devr
->s0
)->msrq
.srqn
);
1802 MLX5_SET(qpc
, qpc
, xrcd
, to_mxrcd(init_attr
->xrcd
)->xrcdn
);
1804 case IB_QPT_XRC_INI
:
1805 MLX5_SET(qpc
, qpc
, cqn_rcv
, to_mcq(devr
->c0
)->mcq
.cqn
);
1806 MLX5_SET(qpc
, qpc
, xrcd
, to_mxrcd(devr
->x1
)->xrcdn
);
1807 MLX5_SET(qpc
, qpc
, srqn_rmpn_xrqn
, to_msrq(devr
->s0
)->msrq
.srqn
);
1810 if (init_attr
->srq
) {
1811 MLX5_SET(qpc
, qpc
, xrcd
, to_mxrcd(devr
->x0
)->xrcdn
);
1812 MLX5_SET(qpc
, qpc
, srqn_rmpn_xrqn
, to_msrq(init_attr
->srq
)->msrq
.srqn
);
1814 MLX5_SET(qpc
, qpc
, xrcd
, to_mxrcd(devr
->x1
)->xrcdn
);
1815 MLX5_SET(qpc
, qpc
, srqn_rmpn_xrqn
, to_msrq(devr
->s1
)->msrq
.srqn
);
1819 if (init_attr
->send_cq
)
1820 MLX5_SET(qpc
, qpc
, cqn_snd
, to_mcq(init_attr
->send_cq
)->mcq
.cqn
);
1822 if (init_attr
->recv_cq
)
1823 MLX5_SET(qpc
, qpc
, cqn_rcv
, to_mcq(init_attr
->recv_cq
)->mcq
.cqn
);
1825 MLX5_SET64(qpc
, qpc
, dbr_addr
, qp
->db
.dma
);
1827 /* 0xffffff means we ask to work with cqe version 0 */
1828 if (MLX5_CAP_GEN(mdev
, cqe_version
) == MLX5_CQE_VERSION_V1
)
1829 MLX5_SET(qpc
, qpc
, user_index
, uidx
);
1831 /* we use IB_QP_CREATE_IPOIB_UD_LSO to indicates ipoib qp */
1832 if (init_attr
->qp_type
== IB_QPT_UD
&&
1833 (init_attr
->create_flags
& IB_QP_CREATE_IPOIB_UD_LSO
)) {
1834 MLX5_SET(qpc
, qpc
, ulp_stateless_offload_mode
, 1);
1835 qp
->flags
|= MLX5_IB_QP_LSO
;
1838 if (init_attr
->create_flags
& IB_QP_CREATE_PCI_WRITE_END_PADDING
) {
1839 if (!MLX5_CAP_GEN(dev
->mdev
, end_pad
)) {
1840 mlx5_ib_dbg(dev
, "scatter end padding is not supported\n");
1843 } else if (init_attr
->qp_type
!= IB_QPT_RAW_PACKET
) {
1844 MLX5_SET(qpc
, qpc
, end_padding_mode
,
1845 MLX5_WQ_END_PAD_MODE_ALIGN
);
1847 qp
->flags
|= MLX5_IB_QP_PCI_WRITE_END_PADDING
;
1856 if (init_attr
->qp_type
== IB_QPT_RAW_PACKET
||
1857 qp
->flags
& MLX5_IB_QP_UNDERLAY
) {
1858 qp
->raw_packet_qp
.sq
.ubuffer
.buf_addr
= ucmd
.sq_buf_addr
;
1859 raw_packet_qp_copy_info(qp
, &qp
->raw_packet_qp
);
1860 err
= create_raw_packet_qp(dev
, qp
, in
, inlen
, pd
);
1862 err
= mlx5_core_create_qp(dev
->mdev
, &base
->mqp
, in
, inlen
);
1866 mlx5_ib_dbg(dev
, "create qp failed\n");
1872 base
->container_mibqp
= qp
;
1873 base
->mqp
.event
= mlx5_ib_qp_event
;
1875 get_cqs(init_attr
->qp_type
, init_attr
->send_cq
, init_attr
->recv_cq
,
1876 &send_cq
, &recv_cq
);
1877 spin_lock_irqsave(&dev
->reset_flow_resource_lock
, flags
);
1878 mlx5_ib_lock_cqs(send_cq
, recv_cq
);
1879 /* Maintain device to QPs access, needed for further handling via reset
1882 list_add_tail(&qp
->qps_list
, &dev
->qp_list
);
1883 /* Maintain CQ to QPs access, needed for further handling via reset flow
1886 list_add_tail(&qp
->cq_send_list
, &send_cq
->list_send_qp
);
1888 list_add_tail(&qp
->cq_recv_list
, &recv_cq
->list_recv_qp
);
1889 mlx5_ib_unlock_cqs(send_cq
, recv_cq
);
1890 spin_unlock_irqrestore(&dev
->reset_flow_resource_lock
, flags
);
1895 if (qp
->create_type
== MLX5_QP_USER
)
1896 destroy_qp_user(dev
, pd
, qp
, base
);
1897 else if (qp
->create_type
== MLX5_QP_KERNEL
)
1898 destroy_qp_kernel(dev
, qp
);
1905 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq
*send_cq
, struct mlx5_ib_cq
*recv_cq
)
1906 __acquires(&send_cq
->lock
) __acquires(&recv_cq
->lock
)
1910 if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
1911 spin_lock(&send_cq
->lock
);
1912 spin_lock_nested(&recv_cq
->lock
,
1913 SINGLE_DEPTH_NESTING
);
1914 } else if (send_cq
->mcq
.cqn
== recv_cq
->mcq
.cqn
) {
1915 spin_lock(&send_cq
->lock
);
1916 __acquire(&recv_cq
->lock
);
1918 spin_lock(&recv_cq
->lock
);
1919 spin_lock_nested(&send_cq
->lock
,
1920 SINGLE_DEPTH_NESTING
);
1923 spin_lock(&send_cq
->lock
);
1924 __acquire(&recv_cq
->lock
);
1926 } else if (recv_cq
) {
1927 spin_lock(&recv_cq
->lock
);
1928 __acquire(&send_cq
->lock
);
1930 __acquire(&send_cq
->lock
);
1931 __acquire(&recv_cq
->lock
);
1935 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq
*send_cq
, struct mlx5_ib_cq
*recv_cq
)
1936 __releases(&send_cq
->lock
) __releases(&recv_cq
->lock
)
1940 if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
1941 spin_unlock(&recv_cq
->lock
);
1942 spin_unlock(&send_cq
->lock
);
1943 } else if (send_cq
->mcq
.cqn
== recv_cq
->mcq
.cqn
) {
1944 __release(&recv_cq
->lock
);
1945 spin_unlock(&send_cq
->lock
);
1947 spin_unlock(&send_cq
->lock
);
1948 spin_unlock(&recv_cq
->lock
);
1951 __release(&recv_cq
->lock
);
1952 spin_unlock(&send_cq
->lock
);
1954 } else if (recv_cq
) {
1955 __release(&send_cq
->lock
);
1956 spin_unlock(&recv_cq
->lock
);
1958 __release(&recv_cq
->lock
);
1959 __release(&send_cq
->lock
);
1963 static struct mlx5_ib_pd
*get_pd(struct mlx5_ib_qp
*qp
)
1965 return to_mpd(qp
->ibqp
.pd
);
1968 static void get_cqs(enum ib_qp_type qp_type
,
1969 struct ib_cq
*ib_send_cq
, struct ib_cq
*ib_recv_cq
,
1970 struct mlx5_ib_cq
**send_cq
, struct mlx5_ib_cq
**recv_cq
)
1973 case IB_QPT_XRC_TGT
:
1977 case MLX5_IB_QPT_REG_UMR
:
1978 case IB_QPT_XRC_INI
:
1979 *send_cq
= ib_send_cq
? to_mcq(ib_send_cq
) : NULL
;
1984 case MLX5_IB_QPT_HW_GSI
:
1988 case IB_QPT_RAW_IPV6
:
1989 case IB_QPT_RAW_ETHERTYPE
:
1990 case IB_QPT_RAW_PACKET
:
1991 *send_cq
= ib_send_cq
? to_mcq(ib_send_cq
) : NULL
;
1992 *recv_cq
= ib_recv_cq
? to_mcq(ib_recv_cq
) : NULL
;
2003 static int modify_raw_packet_qp(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
2004 const struct mlx5_modify_raw_qp_param
*raw_qp_param
,
2005 u8 lag_tx_affinity
);
2007 static void destroy_qp_common(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
)
2009 struct mlx5_ib_cq
*send_cq
, *recv_cq
;
2010 struct mlx5_ib_qp_base
*base
;
2011 unsigned long flags
;
2014 if (qp
->ibqp
.rwq_ind_tbl
) {
2015 destroy_rss_raw_qp_tir(dev
, qp
);
2019 base
= (qp
->ibqp
.qp_type
== IB_QPT_RAW_PACKET
||
2020 qp
->flags
& MLX5_IB_QP_UNDERLAY
) ?
2021 &qp
->raw_packet_qp
.rq
.base
:
2024 if (qp
->state
!= IB_QPS_RESET
) {
2025 if (qp
->ibqp
.qp_type
!= IB_QPT_RAW_PACKET
&&
2026 !(qp
->flags
& MLX5_IB_QP_UNDERLAY
)) {
2027 err
= mlx5_core_qp_modify(dev
->mdev
,
2028 MLX5_CMD_OP_2RST_QP
, 0,
2031 struct mlx5_modify_raw_qp_param raw_qp_param
= {
2032 .operation
= MLX5_CMD_OP_2RST_QP
2035 err
= modify_raw_packet_qp(dev
, qp
, &raw_qp_param
, 0);
2038 mlx5_ib_warn(dev
, "mlx5_ib: modify QP 0x%06x to RESET failed\n",
2042 get_cqs(qp
->ibqp
.qp_type
, qp
->ibqp
.send_cq
, qp
->ibqp
.recv_cq
,
2043 &send_cq
, &recv_cq
);
2045 spin_lock_irqsave(&dev
->reset_flow_resource_lock
, flags
);
2046 mlx5_ib_lock_cqs(send_cq
, recv_cq
);
2047 /* del from lists under both locks above to protect reset flow paths */
2048 list_del(&qp
->qps_list
);
2050 list_del(&qp
->cq_send_list
);
2053 list_del(&qp
->cq_recv_list
);
2055 if (qp
->create_type
== MLX5_QP_KERNEL
) {
2056 __mlx5_ib_cq_clean(recv_cq
, base
->mqp
.qpn
,
2057 qp
->ibqp
.srq
? to_msrq(qp
->ibqp
.srq
) : NULL
);
2058 if (send_cq
!= recv_cq
)
2059 __mlx5_ib_cq_clean(send_cq
, base
->mqp
.qpn
,
2062 mlx5_ib_unlock_cqs(send_cq
, recv_cq
);
2063 spin_unlock_irqrestore(&dev
->reset_flow_resource_lock
, flags
);
2065 if (qp
->ibqp
.qp_type
== IB_QPT_RAW_PACKET
||
2066 qp
->flags
& MLX5_IB_QP_UNDERLAY
) {
2067 destroy_raw_packet_qp(dev
, qp
);
2069 err
= mlx5_core_destroy_qp(dev
->mdev
, &base
->mqp
);
2071 mlx5_ib_warn(dev
, "failed to destroy QP 0x%x\n",
2075 if (qp
->create_type
== MLX5_QP_KERNEL
)
2076 destroy_qp_kernel(dev
, qp
);
2077 else if (qp
->create_type
== MLX5_QP_USER
)
2078 destroy_qp_user(dev
, &get_pd(qp
)->ibpd
, qp
, base
);
2081 static const char *ib_qp_type_str(enum ib_qp_type type
)
2085 return "IB_QPT_SMI";
2087 return "IB_QPT_GSI";
2094 case IB_QPT_RAW_IPV6
:
2095 return "IB_QPT_RAW_IPV6";
2096 case IB_QPT_RAW_ETHERTYPE
:
2097 return "IB_QPT_RAW_ETHERTYPE";
2098 case IB_QPT_XRC_INI
:
2099 return "IB_QPT_XRC_INI";
2100 case IB_QPT_XRC_TGT
:
2101 return "IB_QPT_XRC_TGT";
2102 case IB_QPT_RAW_PACKET
:
2103 return "IB_QPT_RAW_PACKET";
2104 case MLX5_IB_QPT_REG_UMR
:
2105 return "MLX5_IB_QPT_REG_UMR";
2108 return "Invalid QP type";
2112 struct ib_qp
*mlx5_ib_create_qp(struct ib_pd
*pd
,
2113 struct ib_qp_init_attr
*init_attr
,
2114 struct ib_udata
*udata
)
2116 struct mlx5_ib_dev
*dev
;
2117 struct mlx5_ib_qp
*qp
;
2122 dev
= to_mdev(pd
->device
);
2124 if (init_attr
->qp_type
== IB_QPT_RAW_PACKET
) {
2126 mlx5_ib_dbg(dev
, "Raw Packet QP is not supported for kernel consumers\n");
2127 return ERR_PTR(-EINVAL
);
2128 } else if (!to_mucontext(pd
->uobject
->context
)->cqe_version
) {
2129 mlx5_ib_dbg(dev
, "Raw Packet QP is only supported for CQE version > 0\n");
2130 return ERR_PTR(-EINVAL
);
2134 /* being cautious here */
2135 if (init_attr
->qp_type
!= IB_QPT_XRC_TGT
&&
2136 init_attr
->qp_type
!= MLX5_IB_QPT_REG_UMR
) {
2137 pr_warn("%s: no PD for transport %s\n", __func__
,
2138 ib_qp_type_str(init_attr
->qp_type
));
2139 return ERR_PTR(-EINVAL
);
2141 dev
= to_mdev(to_mxrcd(init_attr
->xrcd
)->ibxrcd
.device
);
2144 switch (init_attr
->qp_type
) {
2145 case IB_QPT_XRC_TGT
:
2146 case IB_QPT_XRC_INI
:
2147 if (!MLX5_CAP_GEN(dev
->mdev
, xrc
)) {
2148 mlx5_ib_dbg(dev
, "XRC not supported\n");
2149 return ERR_PTR(-ENOSYS
);
2151 init_attr
->recv_cq
= NULL
;
2152 if (init_attr
->qp_type
== IB_QPT_XRC_TGT
) {
2153 xrcdn
= to_mxrcd(init_attr
->xrcd
)->xrcdn
;
2154 init_attr
->send_cq
= NULL
;
2158 case IB_QPT_RAW_PACKET
:
2163 case MLX5_IB_QPT_HW_GSI
:
2164 case MLX5_IB_QPT_REG_UMR
:
2165 qp
= kzalloc(sizeof(*qp
), GFP_KERNEL
);
2167 return ERR_PTR(-ENOMEM
);
2169 err
= create_qp_common(dev
, pd
, init_attr
, udata
, qp
);
2171 mlx5_ib_dbg(dev
, "create_qp_common failed\n");
2173 return ERR_PTR(err
);
2176 if (is_qp0(init_attr
->qp_type
))
2177 qp
->ibqp
.qp_num
= 0;
2178 else if (is_qp1(init_attr
->qp_type
))
2179 qp
->ibqp
.qp_num
= 1;
2181 qp
->ibqp
.qp_num
= qp
->trans_qp
.base
.mqp
.qpn
;
2183 mlx5_ib_dbg(dev
, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
2184 qp
->ibqp
.qp_num
, qp
->trans_qp
.base
.mqp
.qpn
,
2185 init_attr
->recv_cq
? to_mcq(init_attr
->recv_cq
)->mcq
.cqn
: -1,
2186 init_attr
->send_cq
? to_mcq(init_attr
->send_cq
)->mcq
.cqn
: -1);
2188 qp
->trans_qp
.xrcdn
= xrcdn
;
2193 return mlx5_ib_gsi_create_qp(pd
, init_attr
);
2195 case IB_QPT_RAW_IPV6
:
2196 case IB_QPT_RAW_ETHERTYPE
:
2199 mlx5_ib_dbg(dev
, "unsupported qp type %d\n",
2200 init_attr
->qp_type
);
2201 /* Don't support raw QPs */
2202 return ERR_PTR(-EINVAL
);
2208 int mlx5_ib_destroy_qp(struct ib_qp
*qp
)
2210 struct mlx5_ib_dev
*dev
= to_mdev(qp
->device
);
2211 struct mlx5_ib_qp
*mqp
= to_mqp(qp
);
2213 if (unlikely(qp
->qp_type
== IB_QPT_GSI
))
2214 return mlx5_ib_gsi_destroy_qp(qp
);
2216 destroy_qp_common(dev
, mqp
);
2223 static __be32
to_mlx5_access_flags(struct mlx5_ib_qp
*qp
, const struct ib_qp_attr
*attr
,
2226 u32 hw_access_flags
= 0;
2230 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
2231 dest_rd_atomic
= attr
->max_dest_rd_atomic
;
2233 dest_rd_atomic
= qp
->trans_qp
.resp_depth
;
2235 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
2236 access_flags
= attr
->qp_access_flags
;
2238 access_flags
= qp
->trans_qp
.atomic_rd_en
;
2240 if (!dest_rd_atomic
)
2241 access_flags
&= IB_ACCESS_REMOTE_WRITE
;
2243 if (access_flags
& IB_ACCESS_REMOTE_READ
)
2244 hw_access_flags
|= MLX5_QP_BIT_RRE
;
2245 if (access_flags
& IB_ACCESS_REMOTE_ATOMIC
)
2246 hw_access_flags
|= (MLX5_QP_BIT_RAE
| MLX5_ATOMIC_MODE_CX
);
2247 if (access_flags
& IB_ACCESS_REMOTE_WRITE
)
2248 hw_access_flags
|= MLX5_QP_BIT_RWE
;
2250 return cpu_to_be32(hw_access_flags
);
2254 MLX5_PATH_FLAG_FL
= 1 << 0,
2255 MLX5_PATH_FLAG_FREE_AR
= 1 << 1,
2256 MLX5_PATH_FLAG_COUNTER
= 1 << 2,
2259 static int ib_rate_to_mlx5(struct mlx5_ib_dev
*dev
, u8 rate
)
2261 if (rate
== IB_RATE_PORT_CURRENT
)
2264 if (rate
< IB_RATE_2_5_GBPS
|| rate
> IB_RATE_300_GBPS
)
2267 while (rate
!= IB_RATE_PORT_CURRENT
&&
2268 !(1 << (rate
+ MLX5_STAT_RATE_OFFSET
) &
2269 MLX5_CAP_GEN(dev
->mdev
, stat_rate_support
)))
2272 return rate
? rate
+ MLX5_STAT_RATE_OFFSET
: rate
;
2275 static int modify_raw_packet_eth_prio(struct mlx5_core_dev
*dev
,
2276 struct mlx5_ib_sq
*sq
, u8 sl
)
2283 inlen
= MLX5_ST_SZ_BYTES(modify_tis_in
);
2284 in
= kvzalloc(inlen
, GFP_KERNEL
);
2288 MLX5_SET(modify_tis_in
, in
, bitmask
.prio
, 1);
2290 tisc
= MLX5_ADDR_OF(modify_tis_in
, in
, ctx
);
2291 MLX5_SET(tisc
, tisc
, prio
, ((sl
& 0x7) << 1));
2293 err
= mlx5_core_modify_tis(dev
, sq
->tisn
, in
, inlen
);
2300 static int modify_raw_packet_tx_affinity(struct mlx5_core_dev
*dev
,
2301 struct mlx5_ib_sq
*sq
, u8 tx_affinity
)
2308 inlen
= MLX5_ST_SZ_BYTES(modify_tis_in
);
2309 in
= kvzalloc(inlen
, GFP_KERNEL
);
2313 MLX5_SET(modify_tis_in
, in
, bitmask
.lag_tx_port_affinity
, 1);
2315 tisc
= MLX5_ADDR_OF(modify_tis_in
, in
, ctx
);
2316 MLX5_SET(tisc
, tisc
, lag_tx_port_affinity
, tx_affinity
);
2318 err
= mlx5_core_modify_tis(dev
, sq
->tisn
, in
, inlen
);
2325 static int mlx5_set_path(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
2326 const struct rdma_ah_attr
*ah
,
2327 struct mlx5_qp_path
*path
, u8 port
, int attr_mask
,
2328 u32 path_flags
, const struct ib_qp_attr
*attr
,
2331 const struct ib_global_route
*grh
= rdma_ah_read_grh(ah
);
2333 enum ib_gid_type gid_type
;
2334 u8 ah_flags
= rdma_ah_get_ah_flags(ah
);
2335 u8 sl
= rdma_ah_get_sl(ah
);
2337 if (attr_mask
& IB_QP_PKEY_INDEX
)
2338 path
->pkey_index
= cpu_to_be16(alt
? attr
->alt_pkey_index
:
2341 if (ah_flags
& IB_AH_GRH
) {
2342 if (grh
->sgid_index
>=
2343 dev
->mdev
->port_caps
[port
- 1].gid_table_len
) {
2344 pr_err("sgid_index (%u) too large. max is %d\n",
2346 dev
->mdev
->port_caps
[port
- 1].gid_table_len
);
2351 if (ah
->type
== RDMA_AH_ATTR_TYPE_ROCE
) {
2352 if (!(ah_flags
& IB_AH_GRH
))
2354 err
= mlx5_get_roce_gid_type(dev
, port
, grh
->sgid_index
,
2358 memcpy(path
->rmac
, ah
->roce
.dmac
, sizeof(ah
->roce
.dmac
));
2359 if (qp
->ibqp
.qp_type
== IB_QPT_RC
||
2360 qp
->ibqp
.qp_type
== IB_QPT_UC
||
2361 qp
->ibqp
.qp_type
== IB_QPT_XRC_INI
||
2362 qp
->ibqp
.qp_type
== IB_QPT_XRC_TGT
)
2363 path
->udp_sport
= mlx5_get_roce_udp_sport(dev
, port
,
2365 path
->dci_cfi_prio_sl
= (sl
& 0x7) << 4;
2366 if (gid_type
== IB_GID_TYPE_ROCE_UDP_ENCAP
)
2367 path
->ecn_dscp
= (grh
->traffic_class
>> 2) & 0x3f;
2369 path
->fl_free_ar
= (path_flags
& MLX5_PATH_FLAG_FL
) ? 0x80 : 0;
2371 (path_flags
& MLX5_PATH_FLAG_FREE_AR
) ? 0x40 : 0;
2372 path
->rlid
= cpu_to_be16(rdma_ah_get_dlid(ah
));
2373 path
->grh_mlid
= rdma_ah_get_path_bits(ah
) & 0x7f;
2374 if (ah_flags
& IB_AH_GRH
)
2375 path
->grh_mlid
|= 1 << 7;
2376 path
->dci_cfi_prio_sl
= sl
& 0xf;
2379 if (ah_flags
& IB_AH_GRH
) {
2380 path
->mgid_index
= grh
->sgid_index
;
2381 path
->hop_limit
= grh
->hop_limit
;
2382 path
->tclass_flowlabel
=
2383 cpu_to_be32((grh
->traffic_class
<< 20) |
2385 memcpy(path
->rgid
, grh
->dgid
.raw
, 16);
2388 err
= ib_rate_to_mlx5(dev
, rdma_ah_get_static_rate(ah
));
2391 path
->static_rate
= err
;
2394 if (attr_mask
& IB_QP_TIMEOUT
)
2395 path
->ackto_lt
= (alt
? attr
->alt_timeout
: attr
->timeout
) << 3;
2397 if ((qp
->ibqp
.qp_type
== IB_QPT_RAW_PACKET
) && qp
->sq
.wqe_cnt
)
2398 return modify_raw_packet_eth_prio(dev
->mdev
,
2399 &qp
->raw_packet_qp
.sq
,
2405 static enum mlx5_qp_optpar opt_mask
[MLX5_QP_NUM_STATE
][MLX5_QP_NUM_STATE
][MLX5_QP_ST_MAX
] = {
2406 [MLX5_QP_STATE_INIT
] = {
2407 [MLX5_QP_STATE_INIT
] = {
2408 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_RRE
|
2409 MLX5_QP_OPTPAR_RAE
|
2410 MLX5_QP_OPTPAR_RWE
|
2411 MLX5_QP_OPTPAR_PKEY_INDEX
|
2412 MLX5_QP_OPTPAR_PRI_PORT
,
2413 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_RWE
|
2414 MLX5_QP_OPTPAR_PKEY_INDEX
|
2415 MLX5_QP_OPTPAR_PRI_PORT
,
2416 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_PKEY_INDEX
|
2417 MLX5_QP_OPTPAR_Q_KEY
|
2418 MLX5_QP_OPTPAR_PRI_PORT
,
2420 [MLX5_QP_STATE_RTR
] = {
2421 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
2422 MLX5_QP_OPTPAR_RRE
|
2423 MLX5_QP_OPTPAR_RAE
|
2424 MLX5_QP_OPTPAR_RWE
|
2425 MLX5_QP_OPTPAR_PKEY_INDEX
,
2426 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
2427 MLX5_QP_OPTPAR_RWE
|
2428 MLX5_QP_OPTPAR_PKEY_INDEX
,
2429 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_PKEY_INDEX
|
2430 MLX5_QP_OPTPAR_Q_KEY
,
2431 [MLX5_QP_ST_MLX
] = MLX5_QP_OPTPAR_PKEY_INDEX
|
2432 MLX5_QP_OPTPAR_Q_KEY
,
2433 [MLX5_QP_ST_XRC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
2434 MLX5_QP_OPTPAR_RRE
|
2435 MLX5_QP_OPTPAR_RAE
|
2436 MLX5_QP_OPTPAR_RWE
|
2437 MLX5_QP_OPTPAR_PKEY_INDEX
,
2440 [MLX5_QP_STATE_RTR
] = {
2441 [MLX5_QP_STATE_RTS
] = {
2442 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
2443 MLX5_QP_OPTPAR_RRE
|
2444 MLX5_QP_OPTPAR_RAE
|
2445 MLX5_QP_OPTPAR_RWE
|
2446 MLX5_QP_OPTPAR_PM_STATE
|
2447 MLX5_QP_OPTPAR_RNR_TIMEOUT
,
2448 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
2449 MLX5_QP_OPTPAR_RWE
|
2450 MLX5_QP_OPTPAR_PM_STATE
,
2451 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_Q_KEY
,
2454 [MLX5_QP_STATE_RTS
] = {
2455 [MLX5_QP_STATE_RTS
] = {
2456 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_RRE
|
2457 MLX5_QP_OPTPAR_RAE
|
2458 MLX5_QP_OPTPAR_RWE
|
2459 MLX5_QP_OPTPAR_RNR_TIMEOUT
|
2460 MLX5_QP_OPTPAR_PM_STATE
|
2461 MLX5_QP_OPTPAR_ALT_ADDR_PATH
,
2462 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_RWE
|
2463 MLX5_QP_OPTPAR_PM_STATE
|
2464 MLX5_QP_OPTPAR_ALT_ADDR_PATH
,
2465 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_Q_KEY
|
2466 MLX5_QP_OPTPAR_SRQN
|
2467 MLX5_QP_OPTPAR_CQN_RCV
,
2470 [MLX5_QP_STATE_SQER
] = {
2471 [MLX5_QP_STATE_RTS
] = {
2472 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_Q_KEY
,
2473 [MLX5_QP_ST_MLX
] = MLX5_QP_OPTPAR_Q_KEY
,
2474 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_RWE
,
2475 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_RNR_TIMEOUT
|
2476 MLX5_QP_OPTPAR_RWE
|
2477 MLX5_QP_OPTPAR_RAE
|
2483 static int ib_nr_to_mlx5_nr(int ib_mask
)
2488 case IB_QP_CUR_STATE
:
2490 case IB_QP_EN_SQD_ASYNC_NOTIFY
:
2492 case IB_QP_ACCESS_FLAGS
:
2493 return MLX5_QP_OPTPAR_RWE
| MLX5_QP_OPTPAR_RRE
|
2495 case IB_QP_PKEY_INDEX
:
2496 return MLX5_QP_OPTPAR_PKEY_INDEX
;
2498 return MLX5_QP_OPTPAR_PRI_PORT
;
2500 return MLX5_QP_OPTPAR_Q_KEY
;
2502 return MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH
|
2503 MLX5_QP_OPTPAR_PRI_PORT
;
2504 case IB_QP_PATH_MTU
:
2507 return MLX5_QP_OPTPAR_ACK_TIMEOUT
;
2508 case IB_QP_RETRY_CNT
:
2509 return MLX5_QP_OPTPAR_RETRY_COUNT
;
2510 case IB_QP_RNR_RETRY
:
2511 return MLX5_QP_OPTPAR_RNR_RETRY
;
2514 case IB_QP_MAX_QP_RD_ATOMIC
:
2515 return MLX5_QP_OPTPAR_SRA_MAX
;
2516 case IB_QP_ALT_PATH
:
2517 return MLX5_QP_OPTPAR_ALT_ADDR_PATH
;
2518 case IB_QP_MIN_RNR_TIMER
:
2519 return MLX5_QP_OPTPAR_RNR_TIMEOUT
;
2522 case IB_QP_MAX_DEST_RD_ATOMIC
:
2523 return MLX5_QP_OPTPAR_RRA_MAX
| MLX5_QP_OPTPAR_RWE
|
2524 MLX5_QP_OPTPAR_RRE
| MLX5_QP_OPTPAR_RAE
;
2525 case IB_QP_PATH_MIG_STATE
:
2526 return MLX5_QP_OPTPAR_PM_STATE
;
2529 case IB_QP_DEST_QPN
:
2535 static int ib_mask_to_mlx5_opt(int ib_mask
)
2540 for (i
= 0; i
< 8 * sizeof(int); i
++) {
2541 if ((1 << i
) & ib_mask
)
2542 result
|= ib_nr_to_mlx5_nr(1 << i
);
2548 static int modify_raw_packet_qp_rq(struct mlx5_ib_dev
*dev
,
2549 struct mlx5_ib_rq
*rq
, int new_state
,
2550 const struct mlx5_modify_raw_qp_param
*raw_qp_param
)
2557 inlen
= MLX5_ST_SZ_BYTES(modify_rq_in
);
2558 in
= kvzalloc(inlen
, GFP_KERNEL
);
2562 MLX5_SET(modify_rq_in
, in
, rq_state
, rq
->state
);
2564 rqc
= MLX5_ADDR_OF(modify_rq_in
, in
, ctx
);
2565 MLX5_SET(rqc
, rqc
, state
, new_state
);
2567 if (raw_qp_param
->set_mask
& MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID
) {
2568 if (MLX5_CAP_GEN(dev
->mdev
, modify_rq_counter_set_id
)) {
2569 MLX5_SET64(modify_rq_in
, in
, modify_bitmask
,
2570 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID
);
2571 MLX5_SET(rqc
, rqc
, counter_set_id
, raw_qp_param
->rq_q_ctr_id
);
2573 pr_info_once("%s: RAW PACKET QP counters are not supported on current FW\n",
2577 err
= mlx5_core_modify_rq(dev
->mdev
, rq
->base
.mqp
.qpn
, in
, inlen
);
2581 rq
->state
= new_state
;
2588 static int modify_raw_packet_qp_sq(struct mlx5_core_dev
*dev
,
2589 struct mlx5_ib_sq
*sq
,
2591 const struct mlx5_modify_raw_qp_param
*raw_qp_param
)
2593 struct mlx5_ib_qp
*ibqp
= sq
->base
.container_mibqp
;
2594 u32 old_rate
= ibqp
->rate_limit
;
2595 u32 new_rate
= old_rate
;
2602 inlen
= MLX5_ST_SZ_BYTES(modify_sq_in
);
2603 in
= kvzalloc(inlen
, GFP_KERNEL
);
2607 MLX5_SET(modify_sq_in
, in
, sq_state
, sq
->state
);
2609 sqc
= MLX5_ADDR_OF(modify_sq_in
, in
, ctx
);
2610 MLX5_SET(sqc
, sqc
, state
, new_state
);
2612 if (raw_qp_param
->set_mask
& MLX5_RAW_QP_RATE_LIMIT
) {
2613 if (new_state
!= MLX5_SQC_STATE_RDY
)
2614 pr_warn("%s: Rate limit can only be changed when SQ is moving to RDY\n",
2617 new_rate
= raw_qp_param
->rate_limit
;
2620 if (old_rate
!= new_rate
) {
2622 err
= mlx5_rl_add_rate(dev
, new_rate
, &rl_index
);
2624 pr_err("Failed configuring rate %u: %d\n",
2630 MLX5_SET64(modify_sq_in
, in
, modify_bitmask
, 1);
2631 MLX5_SET(sqc
, sqc
, packet_pacing_rate_limit_index
, rl_index
);
2634 err
= mlx5_core_modify_sq(dev
, sq
->base
.mqp
.qpn
, in
, inlen
);
2636 /* Remove new rate from table if failed */
2638 old_rate
!= new_rate
)
2639 mlx5_rl_remove_rate(dev
, new_rate
);
2643 /* Only remove the old rate after new rate was set */
2645 (old_rate
!= new_rate
)) ||
2646 (new_state
!= MLX5_SQC_STATE_RDY
))
2647 mlx5_rl_remove_rate(dev
, old_rate
);
2649 ibqp
->rate_limit
= new_rate
;
2650 sq
->state
= new_state
;
2657 static int modify_raw_packet_qp(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
2658 const struct mlx5_modify_raw_qp_param
*raw_qp_param
,
2661 struct mlx5_ib_raw_packet_qp
*raw_packet_qp
= &qp
->raw_packet_qp
;
2662 struct mlx5_ib_rq
*rq
= &raw_packet_qp
->rq
;
2663 struct mlx5_ib_sq
*sq
= &raw_packet_qp
->sq
;
2664 int modify_rq
= !!qp
->rq
.wqe_cnt
;
2665 int modify_sq
= !!qp
->sq
.wqe_cnt
;
2670 switch (raw_qp_param
->operation
) {
2671 case MLX5_CMD_OP_RST2INIT_QP
:
2672 rq_state
= MLX5_RQC_STATE_RDY
;
2673 sq_state
= MLX5_SQC_STATE_RDY
;
2675 case MLX5_CMD_OP_2ERR_QP
:
2676 rq_state
= MLX5_RQC_STATE_ERR
;
2677 sq_state
= MLX5_SQC_STATE_ERR
;
2679 case MLX5_CMD_OP_2RST_QP
:
2680 rq_state
= MLX5_RQC_STATE_RST
;
2681 sq_state
= MLX5_SQC_STATE_RST
;
2683 case MLX5_CMD_OP_RTR2RTS_QP
:
2684 case MLX5_CMD_OP_RTS2RTS_QP
:
2685 if (raw_qp_param
->set_mask
==
2686 MLX5_RAW_QP_RATE_LIMIT
) {
2688 sq_state
= sq
->state
;
2690 return raw_qp_param
->set_mask
? -EINVAL
: 0;
2693 case MLX5_CMD_OP_INIT2INIT_QP
:
2694 case MLX5_CMD_OP_INIT2RTR_QP
:
2695 if (raw_qp_param
->set_mask
)
2705 err
= modify_raw_packet_qp_rq(dev
, rq
, rq_state
, raw_qp_param
);
2712 err
= modify_raw_packet_tx_affinity(dev
->mdev
, sq
,
2718 return modify_raw_packet_qp_sq(dev
->mdev
, sq
, sq_state
, raw_qp_param
);
2724 static int __mlx5_ib_modify_qp(struct ib_qp
*ibqp
,
2725 const struct ib_qp_attr
*attr
, int attr_mask
,
2726 enum ib_qp_state cur_state
, enum ib_qp_state new_state
)
2728 static const u16 optab
[MLX5_QP_NUM_STATE
][MLX5_QP_NUM_STATE
] = {
2729 [MLX5_QP_STATE_RST
] = {
2730 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
2731 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
2732 [MLX5_QP_STATE_INIT
] = MLX5_CMD_OP_RST2INIT_QP
,
2734 [MLX5_QP_STATE_INIT
] = {
2735 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
2736 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
2737 [MLX5_QP_STATE_INIT
] = MLX5_CMD_OP_INIT2INIT_QP
,
2738 [MLX5_QP_STATE_RTR
] = MLX5_CMD_OP_INIT2RTR_QP
,
2740 [MLX5_QP_STATE_RTR
] = {
2741 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
2742 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
2743 [MLX5_QP_STATE_RTS
] = MLX5_CMD_OP_RTR2RTS_QP
,
2745 [MLX5_QP_STATE_RTS
] = {
2746 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
2747 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
2748 [MLX5_QP_STATE_RTS
] = MLX5_CMD_OP_RTS2RTS_QP
,
2750 [MLX5_QP_STATE_SQD
] = {
2751 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
2752 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
2754 [MLX5_QP_STATE_SQER
] = {
2755 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
2756 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
2757 [MLX5_QP_STATE_RTS
] = MLX5_CMD_OP_SQERR2RTS_QP
,
2759 [MLX5_QP_STATE_ERR
] = {
2760 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
2761 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
2765 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
2766 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
2767 struct mlx5_ib_qp_base
*base
= &qp
->trans_qp
.base
;
2768 struct mlx5_ib_cq
*send_cq
, *recv_cq
;
2769 struct mlx5_qp_context
*context
;
2770 struct mlx5_ib_pd
*pd
;
2771 struct mlx5_ib_port
*mibport
= NULL
;
2772 enum mlx5_qp_state mlx5_cur
, mlx5_new
;
2773 enum mlx5_qp_optpar optpar
;
2779 context
= kzalloc(sizeof(*context
), GFP_KERNEL
);
2783 err
= to_mlx5_st(ibqp
->qp_type
);
2785 mlx5_ib_dbg(dev
, "unsupported qp type %d\n", ibqp
->qp_type
);
2789 context
->flags
= cpu_to_be32(err
<< 16);
2791 if (!(attr_mask
& IB_QP_PATH_MIG_STATE
)) {
2792 context
->flags
|= cpu_to_be32(MLX5_QP_PM_MIGRATED
<< 11);
2794 switch (attr
->path_mig_state
) {
2795 case IB_MIG_MIGRATED
:
2796 context
->flags
|= cpu_to_be32(MLX5_QP_PM_MIGRATED
<< 11);
2799 context
->flags
|= cpu_to_be32(MLX5_QP_PM_REARM
<< 11);
2802 context
->flags
|= cpu_to_be32(MLX5_QP_PM_ARMED
<< 11);
2807 if ((cur_state
== IB_QPS_RESET
) && (new_state
== IB_QPS_INIT
)) {
2808 if ((ibqp
->qp_type
== IB_QPT_RC
) ||
2809 (ibqp
->qp_type
== IB_QPT_UD
&&
2810 !(qp
->flags
& MLX5_IB_QP_SQPN_QP1
)) ||
2811 (ibqp
->qp_type
== IB_QPT_UC
) ||
2812 (ibqp
->qp_type
== IB_QPT_RAW_PACKET
) ||
2813 (ibqp
->qp_type
== IB_QPT_XRC_INI
) ||
2814 (ibqp
->qp_type
== IB_QPT_XRC_TGT
)) {
2815 if (mlx5_lag_is_active(dev
->mdev
)) {
2816 tx_affinity
= (unsigned int)atomic_add_return(1,
2817 &dev
->roce
.next_port
) %
2819 context
->flags
|= cpu_to_be32(tx_affinity
<< 24);
2824 if (is_sqp(ibqp
->qp_type
)) {
2825 context
->mtu_msgmax
= (IB_MTU_256
<< 5) | 8;
2826 } else if ((ibqp
->qp_type
== IB_QPT_UD
&&
2827 !(qp
->flags
& MLX5_IB_QP_UNDERLAY
)) ||
2828 ibqp
->qp_type
== MLX5_IB_QPT_REG_UMR
) {
2829 context
->mtu_msgmax
= (IB_MTU_4096
<< 5) | 12;
2830 } else if (attr_mask
& IB_QP_PATH_MTU
) {
2831 if (attr
->path_mtu
< IB_MTU_256
||
2832 attr
->path_mtu
> IB_MTU_4096
) {
2833 mlx5_ib_warn(dev
, "invalid mtu %d\n", attr
->path_mtu
);
2837 context
->mtu_msgmax
= (attr
->path_mtu
<< 5) |
2838 (u8
)MLX5_CAP_GEN(dev
->mdev
, log_max_msg
);
2841 if (attr_mask
& IB_QP_DEST_QPN
)
2842 context
->log_pg_sz_remote_qpn
= cpu_to_be32(attr
->dest_qp_num
);
2844 if (attr_mask
& IB_QP_PKEY_INDEX
)
2845 context
->pri_path
.pkey_index
= cpu_to_be16(attr
->pkey_index
);
2847 /* todo implement counter_index functionality */
2849 if (is_sqp(ibqp
->qp_type
))
2850 context
->pri_path
.port
= qp
->port
;
2852 if (attr_mask
& IB_QP_PORT
)
2853 context
->pri_path
.port
= attr
->port_num
;
2855 if (attr_mask
& IB_QP_AV
) {
2856 err
= mlx5_set_path(dev
, qp
, &attr
->ah_attr
, &context
->pri_path
,
2857 attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
,
2858 attr_mask
, 0, attr
, false);
2863 if (attr_mask
& IB_QP_TIMEOUT
)
2864 context
->pri_path
.ackto_lt
|= attr
->timeout
<< 3;
2866 if (attr_mask
& IB_QP_ALT_PATH
) {
2867 err
= mlx5_set_path(dev
, qp
, &attr
->alt_ah_attr
,
2870 attr_mask
| IB_QP_PKEY_INDEX
| IB_QP_TIMEOUT
,
2877 get_cqs(qp
->ibqp
.qp_type
, qp
->ibqp
.send_cq
, qp
->ibqp
.recv_cq
,
2878 &send_cq
, &recv_cq
);
2880 context
->flags_pd
= cpu_to_be32(pd
? pd
->pdn
: to_mpd(dev
->devr
.p0
)->pdn
);
2881 context
->cqn_send
= send_cq
? cpu_to_be32(send_cq
->mcq
.cqn
) : 0;
2882 context
->cqn_recv
= recv_cq
? cpu_to_be32(recv_cq
->mcq
.cqn
) : 0;
2883 context
->params1
= cpu_to_be32(MLX5_IB_ACK_REQ_FREQ
<< 28);
2885 if (attr_mask
& IB_QP_RNR_RETRY
)
2886 context
->params1
|= cpu_to_be32(attr
->rnr_retry
<< 13);
2888 if (attr_mask
& IB_QP_RETRY_CNT
)
2889 context
->params1
|= cpu_to_be32(attr
->retry_cnt
<< 16);
2891 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
) {
2892 if (attr
->max_rd_atomic
)
2894 cpu_to_be32(fls(attr
->max_rd_atomic
- 1) << 21);
2897 if (attr_mask
& IB_QP_SQ_PSN
)
2898 context
->next_send_psn
= cpu_to_be32(attr
->sq_psn
);
2900 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
) {
2901 if (attr
->max_dest_rd_atomic
)
2903 cpu_to_be32(fls(attr
->max_dest_rd_atomic
- 1) << 21);
2906 if (attr_mask
& (IB_QP_ACCESS_FLAGS
| IB_QP_MAX_DEST_RD_ATOMIC
))
2907 context
->params2
|= to_mlx5_access_flags(qp
, attr
, attr_mask
);
2909 if (attr_mask
& IB_QP_MIN_RNR_TIMER
)
2910 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->min_rnr_timer
<< 24);
2912 if (attr_mask
& IB_QP_RQ_PSN
)
2913 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->rq_psn
);
2915 if (attr_mask
& IB_QP_QKEY
)
2916 context
->qkey
= cpu_to_be32(attr
->qkey
);
2918 if (qp
->rq
.wqe_cnt
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
2919 context
->db_rec_addr
= cpu_to_be64(qp
->db
.dma
);
2921 if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
) {
2922 u8 port_num
= (attr_mask
& IB_QP_PORT
? attr
->port_num
:
2925 /* Underlay port should be used - index 0 function per port */
2926 if (qp
->flags
& MLX5_IB_QP_UNDERLAY
)
2929 mibport
= &dev
->port
[port_num
];
2930 context
->qp_counter_set_usr_page
|=
2931 cpu_to_be32((u32
)(mibport
->cnts
.set_id
) << 24);
2934 if (!ibqp
->uobject
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
2935 context
->sq_crq_size
|= cpu_to_be16(1 << 4);
2937 if (qp
->flags
& MLX5_IB_QP_SQPN_QP1
)
2938 context
->deth_sqpn
= cpu_to_be32(1);
2940 mlx5_cur
= to_mlx5_state(cur_state
);
2941 mlx5_new
= to_mlx5_state(new_state
);
2942 mlx5_st
= to_mlx5_st(ibqp
->qp_type
);
2946 if (mlx5_cur
>= MLX5_QP_NUM_STATE
|| mlx5_new
>= MLX5_QP_NUM_STATE
||
2947 !optab
[mlx5_cur
][mlx5_new
]) {
2952 op
= optab
[mlx5_cur
][mlx5_new
];
2953 optpar
= ib_mask_to_mlx5_opt(attr_mask
);
2954 optpar
&= opt_mask
[mlx5_cur
][mlx5_new
][mlx5_st
];
2956 if (qp
->ibqp
.qp_type
== IB_QPT_RAW_PACKET
||
2957 qp
->flags
& MLX5_IB_QP_UNDERLAY
) {
2958 struct mlx5_modify_raw_qp_param raw_qp_param
= {};
2960 raw_qp_param
.operation
= op
;
2961 if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
) {
2962 raw_qp_param
.rq_q_ctr_id
= mibport
->cnts
.set_id
;
2963 raw_qp_param
.set_mask
|= MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID
;
2966 if (attr_mask
& IB_QP_RATE_LIMIT
) {
2967 raw_qp_param
.rate_limit
= attr
->rate_limit
;
2968 raw_qp_param
.set_mask
|= MLX5_RAW_QP_RATE_LIMIT
;
2971 err
= modify_raw_packet_qp(dev
, qp
, &raw_qp_param
, tx_affinity
);
2973 err
= mlx5_core_qp_modify(dev
->mdev
, op
, optpar
, context
,
2980 qp
->state
= new_state
;
2982 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
2983 qp
->trans_qp
.atomic_rd_en
= attr
->qp_access_flags
;
2984 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
2985 qp
->trans_qp
.resp_depth
= attr
->max_dest_rd_atomic
;
2986 if (attr_mask
& IB_QP_PORT
)
2987 qp
->port
= attr
->port_num
;
2988 if (attr_mask
& IB_QP_ALT_PATH
)
2989 qp
->trans_qp
.alt_port
= attr
->alt_port_num
;
2992 * If we moved a kernel QP to RESET, clean up all old CQ
2993 * entries and reinitialize the QP.
2995 if (new_state
== IB_QPS_RESET
&&
2996 !ibqp
->uobject
&& ibqp
->qp_type
!= IB_QPT_XRC_TGT
) {
2997 mlx5_ib_cq_clean(recv_cq
, base
->mqp
.qpn
,
2998 ibqp
->srq
? to_msrq(ibqp
->srq
) : NULL
);
2999 if (send_cq
!= recv_cq
)
3000 mlx5_ib_cq_clean(send_cq
, base
->mqp
.qpn
, NULL
);
3006 qp
->sq
.cur_post
= 0;
3007 qp
->sq
.last_poll
= 0;
3008 qp
->db
.db
[MLX5_RCV_DBR
] = 0;
3009 qp
->db
.db
[MLX5_SND_DBR
] = 0;
3017 int mlx5_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
3018 int attr_mask
, struct ib_udata
*udata
)
3020 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
3021 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
3022 enum ib_qp_type qp_type
;
3023 enum ib_qp_state cur_state
, new_state
;
3026 enum rdma_link_layer ll
= IB_LINK_LAYER_UNSPECIFIED
;
3028 if (ibqp
->rwq_ind_tbl
)
3031 if (unlikely(ibqp
->qp_type
== IB_QPT_GSI
))
3032 return mlx5_ib_gsi_modify_qp(ibqp
, attr
, attr_mask
);
3034 qp_type
= (unlikely(ibqp
->qp_type
== MLX5_IB_QPT_HW_GSI
)) ?
3035 IB_QPT_GSI
: ibqp
->qp_type
;
3037 mutex_lock(&qp
->mutex
);
3039 cur_state
= attr_mask
& IB_QP_CUR_STATE
? attr
->cur_qp_state
: qp
->state
;
3040 new_state
= attr_mask
& IB_QP_STATE
? attr
->qp_state
: cur_state
;
3042 if (!(cur_state
== new_state
&& cur_state
== IB_QPS_RESET
)) {
3043 port
= attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
;
3044 ll
= dev
->ib_dev
.get_link_layer(&dev
->ib_dev
, port
);
3047 if (qp
->flags
& MLX5_IB_QP_UNDERLAY
) {
3048 if (attr_mask
& ~(IB_QP_STATE
| IB_QP_CUR_STATE
)) {
3049 mlx5_ib_dbg(dev
, "invalid attr_mask 0x%x when underlay QP is used\n",
3053 } else if (qp_type
!= MLX5_IB_QPT_REG_UMR
&&
3054 !ib_modify_qp_is_ok(cur_state
, new_state
, qp_type
, attr_mask
, ll
)) {
3055 mlx5_ib_dbg(dev
, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n",
3056 cur_state
, new_state
, ibqp
->qp_type
, attr_mask
);
3060 if ((attr_mask
& IB_QP_PORT
) &&
3061 (attr
->port_num
== 0 ||
3062 attr
->port_num
> MLX5_CAP_GEN(dev
->mdev
, num_ports
))) {
3063 mlx5_ib_dbg(dev
, "invalid port number %d. number of ports is %d\n",
3064 attr
->port_num
, dev
->num_ports
);
3068 if (attr_mask
& IB_QP_PKEY_INDEX
) {
3069 port
= attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
;
3070 if (attr
->pkey_index
>=
3071 dev
->mdev
->port_caps
[port
- 1].pkey_table_len
) {
3072 mlx5_ib_dbg(dev
, "invalid pkey index %d\n",
3078 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
&&
3079 attr
->max_rd_atomic
>
3080 (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_ra_res_qp
))) {
3081 mlx5_ib_dbg(dev
, "invalid max_rd_atomic value %d\n",
3082 attr
->max_rd_atomic
);
3086 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
&&
3087 attr
->max_dest_rd_atomic
>
3088 (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_ra_req_qp
))) {
3089 mlx5_ib_dbg(dev
, "invalid max_dest_rd_atomic value %d\n",
3090 attr
->max_dest_rd_atomic
);
3094 if (cur_state
== new_state
&& cur_state
== IB_QPS_RESET
) {
3099 err
= __mlx5_ib_modify_qp(ibqp
, attr
, attr_mask
, cur_state
, new_state
);
3102 mutex_unlock(&qp
->mutex
);
3106 static int mlx5_wq_overflow(struct mlx5_ib_wq
*wq
, int nreq
, struct ib_cq
*ib_cq
)
3108 struct mlx5_ib_cq
*cq
;
3111 cur
= wq
->head
- wq
->tail
;
3112 if (likely(cur
+ nreq
< wq
->max_post
))
3116 spin_lock(&cq
->lock
);
3117 cur
= wq
->head
- wq
->tail
;
3118 spin_unlock(&cq
->lock
);
3120 return cur
+ nreq
>= wq
->max_post
;
3123 static __always_inline
void set_raddr_seg(struct mlx5_wqe_raddr_seg
*rseg
,
3124 u64 remote_addr
, u32 rkey
)
3126 rseg
->raddr
= cpu_to_be64(remote_addr
);
3127 rseg
->rkey
= cpu_to_be32(rkey
);
3131 static void *set_eth_seg(struct mlx5_wqe_eth_seg
*eseg
,
3132 struct ib_send_wr
*wr
, void *qend
,
3133 struct mlx5_ib_qp
*qp
, int *size
)
3137 memset(eseg
, 0, sizeof(struct mlx5_wqe_eth_seg
));
3139 if (wr
->send_flags
& IB_SEND_IP_CSUM
)
3140 eseg
->cs_flags
= MLX5_ETH_WQE_L3_CSUM
|
3141 MLX5_ETH_WQE_L4_CSUM
;
3143 seg
+= sizeof(struct mlx5_wqe_eth_seg
);
3144 *size
+= sizeof(struct mlx5_wqe_eth_seg
) / 16;
3146 if (wr
->opcode
== IB_WR_LSO
) {
3147 struct ib_ud_wr
*ud_wr
= container_of(wr
, struct ib_ud_wr
, wr
);
3148 int size_of_inl_hdr_start
= sizeof(eseg
->inline_hdr
.start
);
3149 u64 left
, leftlen
, copysz
;
3150 void *pdata
= ud_wr
->header
;
3153 eseg
->mss
= cpu_to_be16(ud_wr
->mss
);
3154 eseg
->inline_hdr
.sz
= cpu_to_be16(left
);
3157 * check if there is space till the end of queue, if yes,
3158 * copy all in one shot, otherwise copy till the end of queue,
3159 * rollback and than the copy the left
3161 leftlen
= qend
- (void *)eseg
->inline_hdr
.start
;
3162 copysz
= min_t(u64
, leftlen
, left
);
3164 memcpy(seg
- size_of_inl_hdr_start
, pdata
, copysz
);
3166 if (likely(copysz
> size_of_inl_hdr_start
)) {
3167 seg
+= ALIGN(copysz
- size_of_inl_hdr_start
, 16);
3168 *size
+= ALIGN(copysz
- size_of_inl_hdr_start
, 16) / 16;
3171 if (unlikely(copysz
< left
)) { /* the last wqe in the queue */
3172 seg
= mlx5_get_send_wqe(qp
, 0);
3175 memcpy(seg
, pdata
, left
);
3176 seg
+= ALIGN(left
, 16);
3177 *size
+= ALIGN(left
, 16) / 16;
3184 static void set_datagram_seg(struct mlx5_wqe_datagram_seg
*dseg
,
3185 struct ib_send_wr
*wr
)
3187 memcpy(&dseg
->av
, &to_mah(ud_wr(wr
)->ah
)->av
, sizeof(struct mlx5_av
));
3188 dseg
->av
.dqp_dct
= cpu_to_be32(ud_wr(wr
)->remote_qpn
| MLX5_EXTENDED_UD_AV
);
3189 dseg
->av
.key
.qkey
.qkey
= cpu_to_be32(ud_wr(wr
)->remote_qkey
);
3192 static void set_data_ptr_seg(struct mlx5_wqe_data_seg
*dseg
, struct ib_sge
*sg
)
3194 dseg
->byte_count
= cpu_to_be32(sg
->length
);
3195 dseg
->lkey
= cpu_to_be32(sg
->lkey
);
3196 dseg
->addr
= cpu_to_be64(sg
->addr
);
3199 static u64
get_xlt_octo(u64 bytes
)
3201 return ALIGN(bytes
, MLX5_IB_UMR_XLT_ALIGNMENT
) /
3202 MLX5_IB_UMR_OCTOWORD
;
3205 static __be64
frwr_mkey_mask(void)
3209 result
= MLX5_MKEY_MASK_LEN
|
3210 MLX5_MKEY_MASK_PAGE_SIZE
|
3211 MLX5_MKEY_MASK_START_ADDR
|
3212 MLX5_MKEY_MASK_EN_RINVAL
|
3213 MLX5_MKEY_MASK_KEY
|
3219 MLX5_MKEY_MASK_SMALL_FENCE
|
3220 MLX5_MKEY_MASK_FREE
;
3222 return cpu_to_be64(result
);
3225 static __be64
sig_mkey_mask(void)
3229 result
= MLX5_MKEY_MASK_LEN
|
3230 MLX5_MKEY_MASK_PAGE_SIZE
|
3231 MLX5_MKEY_MASK_START_ADDR
|
3232 MLX5_MKEY_MASK_EN_SIGERR
|
3233 MLX5_MKEY_MASK_EN_RINVAL
|
3234 MLX5_MKEY_MASK_KEY
|
3239 MLX5_MKEY_MASK_SMALL_FENCE
|
3240 MLX5_MKEY_MASK_FREE
|
3241 MLX5_MKEY_MASK_BSF_EN
;
3243 return cpu_to_be64(result
);
3246 static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg
*umr
,
3247 struct mlx5_ib_mr
*mr
)
3249 int size
= mr
->ndescs
* mr
->desc_size
;
3251 memset(umr
, 0, sizeof(*umr
));
3253 umr
->flags
= MLX5_UMR_CHECK_NOT_FREE
;
3254 umr
->xlt_octowords
= cpu_to_be16(get_xlt_octo(size
));
3255 umr
->mkey_mask
= frwr_mkey_mask();
3258 static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg
*umr
)
3260 memset(umr
, 0, sizeof(*umr
));
3261 umr
->mkey_mask
= cpu_to_be64(MLX5_MKEY_MASK_FREE
);
3262 umr
->flags
= MLX5_UMR_INLINE
;
3265 static __be64
get_umr_enable_mr_mask(void)
3269 result
= MLX5_MKEY_MASK_KEY
|
3270 MLX5_MKEY_MASK_FREE
;
3272 return cpu_to_be64(result
);
3275 static __be64
get_umr_disable_mr_mask(void)
3279 result
= MLX5_MKEY_MASK_FREE
;
3281 return cpu_to_be64(result
);
3284 static __be64
get_umr_update_translation_mask(void)
3288 result
= MLX5_MKEY_MASK_LEN
|
3289 MLX5_MKEY_MASK_PAGE_SIZE
|
3290 MLX5_MKEY_MASK_START_ADDR
;
3292 return cpu_to_be64(result
);
3295 static __be64
get_umr_update_access_mask(int atomic
)
3299 result
= MLX5_MKEY_MASK_LR
|
3305 result
|= MLX5_MKEY_MASK_A
;
3307 return cpu_to_be64(result
);
3310 static __be64
get_umr_update_pd_mask(void)
3314 result
= MLX5_MKEY_MASK_PD
;
3316 return cpu_to_be64(result
);
3319 static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg
*umr
,
3320 struct ib_send_wr
*wr
, int atomic
)
3322 struct mlx5_umr_wr
*umrwr
= umr_wr(wr
);
3324 memset(umr
, 0, sizeof(*umr
));
3326 if (!umrwr
->ignore_free_state
) {
3327 if (wr
->send_flags
& MLX5_IB_SEND_UMR_FAIL_IF_FREE
)
3329 umr
->flags
= MLX5_UMR_CHECK_FREE
;
3331 /* fail if not free */
3332 umr
->flags
= MLX5_UMR_CHECK_NOT_FREE
;
3335 umr
->xlt_octowords
= cpu_to_be16(get_xlt_octo(umrwr
->xlt_size
));
3336 if (wr
->send_flags
& MLX5_IB_SEND_UMR_UPDATE_XLT
) {
3337 u64 offset
= get_xlt_octo(umrwr
->offset
);
3339 umr
->xlt_offset
= cpu_to_be16(offset
& 0xffff);
3340 umr
->xlt_offset_47_16
= cpu_to_be32(offset
>> 16);
3341 umr
->flags
|= MLX5_UMR_TRANSLATION_OFFSET_EN
;
3343 if (wr
->send_flags
& MLX5_IB_SEND_UMR_UPDATE_TRANSLATION
)
3344 umr
->mkey_mask
|= get_umr_update_translation_mask();
3345 if (wr
->send_flags
& MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS
) {
3346 umr
->mkey_mask
|= get_umr_update_access_mask(atomic
);
3347 umr
->mkey_mask
|= get_umr_update_pd_mask();
3349 if (wr
->send_flags
& MLX5_IB_SEND_UMR_ENABLE_MR
)
3350 umr
->mkey_mask
|= get_umr_enable_mr_mask();
3351 if (wr
->send_flags
& MLX5_IB_SEND_UMR_DISABLE_MR
)
3352 umr
->mkey_mask
|= get_umr_disable_mr_mask();
3355 umr
->flags
|= MLX5_UMR_INLINE
;
3358 static u8
get_umr_flags(int acc
)
3360 return (acc
& IB_ACCESS_REMOTE_ATOMIC
? MLX5_PERM_ATOMIC
: 0) |
3361 (acc
& IB_ACCESS_REMOTE_WRITE
? MLX5_PERM_REMOTE_WRITE
: 0) |
3362 (acc
& IB_ACCESS_REMOTE_READ
? MLX5_PERM_REMOTE_READ
: 0) |
3363 (acc
& IB_ACCESS_LOCAL_WRITE
? MLX5_PERM_LOCAL_WRITE
: 0) |
3364 MLX5_PERM_LOCAL_READ
| MLX5_PERM_UMR_EN
;
3367 static void set_reg_mkey_seg(struct mlx5_mkey_seg
*seg
,
3368 struct mlx5_ib_mr
*mr
,
3369 u32 key
, int access
)
3371 int ndescs
= ALIGN(mr
->ndescs
, 8) >> 1;
3373 memset(seg
, 0, sizeof(*seg
));
3375 if (mr
->access_mode
== MLX5_MKC_ACCESS_MODE_MTT
)
3376 seg
->log2_page_size
= ilog2(mr
->ibmr
.page_size
);
3377 else if (mr
->access_mode
== MLX5_MKC_ACCESS_MODE_KLMS
)
3378 /* KLMs take twice the size of MTTs */
3381 seg
->flags
= get_umr_flags(access
) | mr
->access_mode
;
3382 seg
->qpn_mkey7_0
= cpu_to_be32((key
& 0xff) | 0xffffff00);
3383 seg
->flags_pd
= cpu_to_be32(MLX5_MKEY_REMOTE_INVAL
);
3384 seg
->start_addr
= cpu_to_be64(mr
->ibmr
.iova
);
3385 seg
->len
= cpu_to_be64(mr
->ibmr
.length
);
3386 seg
->xlt_oct_size
= cpu_to_be32(ndescs
);
3389 static void set_linv_mkey_seg(struct mlx5_mkey_seg
*seg
)
3391 memset(seg
, 0, sizeof(*seg
));
3392 seg
->status
= MLX5_MKEY_STATUS_FREE
;
3395 static void set_reg_mkey_segment(struct mlx5_mkey_seg
*seg
, struct ib_send_wr
*wr
)
3397 struct mlx5_umr_wr
*umrwr
= umr_wr(wr
);
3399 memset(seg
, 0, sizeof(*seg
));
3400 if (wr
->send_flags
& MLX5_IB_SEND_UMR_DISABLE_MR
)
3401 seg
->status
= MLX5_MKEY_STATUS_FREE
;
3403 seg
->flags
= convert_access(umrwr
->access_flags
);
3405 seg
->flags_pd
= cpu_to_be32(to_mpd(umrwr
->pd
)->pdn
);
3406 if (wr
->send_flags
& MLX5_IB_SEND_UMR_UPDATE_TRANSLATION
&&
3408 seg
->flags_pd
|= cpu_to_be32(MLX5_MKEY_LEN64
);
3410 seg
->start_addr
= cpu_to_be64(umrwr
->virt_addr
);
3411 seg
->len
= cpu_to_be64(umrwr
->length
);
3412 seg
->log2_page_size
= umrwr
->page_shift
;
3413 seg
->qpn_mkey7_0
= cpu_to_be32(0xffffff00 |
3414 mlx5_mkey_variant(umrwr
->mkey
));
3417 static void set_reg_data_seg(struct mlx5_wqe_data_seg
*dseg
,
3418 struct mlx5_ib_mr
*mr
,
3419 struct mlx5_ib_pd
*pd
)
3421 int bcount
= mr
->desc_size
* mr
->ndescs
;
3423 dseg
->addr
= cpu_to_be64(mr
->desc_map
);
3424 dseg
->byte_count
= cpu_to_be32(ALIGN(bcount
, 64));
3425 dseg
->lkey
= cpu_to_be32(pd
->ibpd
.local_dma_lkey
);
3428 static __be32
send_ieth(struct ib_send_wr
*wr
)
3430 switch (wr
->opcode
) {
3431 case IB_WR_SEND_WITH_IMM
:
3432 case IB_WR_RDMA_WRITE_WITH_IMM
:
3433 return wr
->ex
.imm_data
;
3435 case IB_WR_SEND_WITH_INV
:
3436 return cpu_to_be32(wr
->ex
.invalidate_rkey
);
3443 static u8
calc_sig(void *wqe
, int size
)
3449 for (i
= 0; i
< size
; i
++)
3455 static u8
wq_sig(void *wqe
)
3457 return calc_sig(wqe
, (*((u8
*)wqe
+ 8) & 0x3f) << 4);
3460 static int set_data_inl_seg(struct mlx5_ib_qp
*qp
, struct ib_send_wr
*wr
,
3463 struct mlx5_wqe_inline_seg
*seg
;
3464 void *qend
= qp
->sq
.qend
;
3472 wqe
+= sizeof(*seg
);
3473 for (i
= 0; i
< wr
->num_sge
; i
++) {
3474 addr
= (void *)(unsigned long)(wr
->sg_list
[i
].addr
);
3475 len
= wr
->sg_list
[i
].length
;
3478 if (unlikely(inl
> qp
->max_inline_data
))
3481 if (unlikely(wqe
+ len
> qend
)) {
3483 memcpy(wqe
, addr
, copy
);
3486 wqe
= mlx5_get_send_wqe(qp
, 0);
3488 memcpy(wqe
, addr
, len
);
3492 seg
->byte_count
= cpu_to_be32(inl
| MLX5_INLINE_SEG
);
3494 *sz
= ALIGN(inl
+ sizeof(seg
->byte_count
), 16) / 16;
3499 static u16
prot_field_size(enum ib_signature_type type
)
3502 case IB_SIG_TYPE_T10_DIF
:
3503 return MLX5_DIF_SIZE
;
3509 static u8
bs_selector(int block_size
)
3511 switch (block_size
) {
3512 case 512: return 0x1;
3513 case 520: return 0x2;
3514 case 4096: return 0x3;
3515 case 4160: return 0x4;
3516 case 1073741824: return 0x5;
3521 static void mlx5_fill_inl_bsf(struct ib_sig_domain
*domain
,
3522 struct mlx5_bsf_inl
*inl
)
3524 /* Valid inline section and allow BSF refresh */
3525 inl
->vld_refresh
= cpu_to_be16(MLX5_BSF_INL_VALID
|
3526 MLX5_BSF_REFRESH_DIF
);
3527 inl
->dif_apptag
= cpu_to_be16(domain
->sig
.dif
.app_tag
);
3528 inl
->dif_reftag
= cpu_to_be32(domain
->sig
.dif
.ref_tag
);
3529 /* repeating block */
3530 inl
->rp_inv_seed
= MLX5_BSF_REPEAT_BLOCK
;
3531 inl
->sig_type
= domain
->sig
.dif
.bg_type
== IB_T10DIF_CRC
?
3532 MLX5_DIF_CRC
: MLX5_DIF_IPCS
;
3534 if (domain
->sig
.dif
.ref_remap
)
3535 inl
->dif_inc_ref_guard_check
|= MLX5_BSF_INC_REFTAG
;
3537 if (domain
->sig
.dif
.app_escape
) {
3538 if (domain
->sig
.dif
.ref_escape
)
3539 inl
->dif_inc_ref_guard_check
|= MLX5_BSF_APPREF_ESCAPE
;
3541 inl
->dif_inc_ref_guard_check
|= MLX5_BSF_APPTAG_ESCAPE
;
3544 inl
->dif_app_bitmask_check
=
3545 cpu_to_be16(domain
->sig
.dif
.apptag_check_mask
);
3548 static int mlx5_set_bsf(struct ib_mr
*sig_mr
,
3549 struct ib_sig_attrs
*sig_attrs
,
3550 struct mlx5_bsf
*bsf
, u32 data_size
)
3552 struct mlx5_core_sig_ctx
*msig
= to_mmr(sig_mr
)->sig
;
3553 struct mlx5_bsf_basic
*basic
= &bsf
->basic
;
3554 struct ib_sig_domain
*mem
= &sig_attrs
->mem
;
3555 struct ib_sig_domain
*wire
= &sig_attrs
->wire
;
3557 memset(bsf
, 0, sizeof(*bsf
));
3559 /* Basic + Extended + Inline */
3560 basic
->bsf_size_sbs
= 1 << 7;
3561 /* Input domain check byte mask */
3562 basic
->check_byte_mask
= sig_attrs
->check_mask
;
3563 basic
->raw_data_size
= cpu_to_be32(data_size
);
3566 switch (sig_attrs
->mem
.sig_type
) {
3567 case IB_SIG_TYPE_NONE
:
3569 case IB_SIG_TYPE_T10_DIF
:
3570 basic
->mem
.bs_selector
= bs_selector(mem
->sig
.dif
.pi_interval
);
3571 basic
->m_bfs_psv
= cpu_to_be32(msig
->psv_memory
.psv_idx
);
3572 mlx5_fill_inl_bsf(mem
, &bsf
->m_inl
);
3579 switch (sig_attrs
->wire
.sig_type
) {
3580 case IB_SIG_TYPE_NONE
:
3582 case IB_SIG_TYPE_T10_DIF
:
3583 if (mem
->sig
.dif
.pi_interval
== wire
->sig
.dif
.pi_interval
&&
3584 mem
->sig_type
== wire
->sig_type
) {
3585 /* Same block structure */
3586 basic
->bsf_size_sbs
|= 1 << 4;
3587 if (mem
->sig
.dif
.bg_type
== wire
->sig
.dif
.bg_type
)
3588 basic
->wire
.copy_byte_mask
|= MLX5_CPY_GRD_MASK
;
3589 if (mem
->sig
.dif
.app_tag
== wire
->sig
.dif
.app_tag
)
3590 basic
->wire
.copy_byte_mask
|= MLX5_CPY_APP_MASK
;
3591 if (mem
->sig
.dif
.ref_tag
== wire
->sig
.dif
.ref_tag
)
3592 basic
->wire
.copy_byte_mask
|= MLX5_CPY_REF_MASK
;
3594 basic
->wire
.bs_selector
= bs_selector(wire
->sig
.dif
.pi_interval
);
3596 basic
->w_bfs_psv
= cpu_to_be32(msig
->psv_wire
.psv_idx
);
3597 mlx5_fill_inl_bsf(wire
, &bsf
->w_inl
);
3606 static int set_sig_data_segment(struct ib_sig_handover_wr
*wr
,
3607 struct mlx5_ib_qp
*qp
, void **seg
, int *size
)
3609 struct ib_sig_attrs
*sig_attrs
= wr
->sig_attrs
;
3610 struct ib_mr
*sig_mr
= wr
->sig_mr
;
3611 struct mlx5_bsf
*bsf
;
3612 u32 data_len
= wr
->wr
.sg_list
->length
;
3613 u32 data_key
= wr
->wr
.sg_list
->lkey
;
3614 u64 data_va
= wr
->wr
.sg_list
->addr
;
3619 (data_key
== wr
->prot
->lkey
&&
3620 data_va
== wr
->prot
->addr
&&
3621 data_len
== wr
->prot
->length
)) {
3623 * Source domain doesn't contain signature information
3624 * or data and protection are interleaved in memory.
3625 * So need construct:
3626 * ------------------
3628 * ------------------
3630 * ------------------
3632 struct mlx5_klm
*data_klm
= *seg
;
3634 data_klm
->bcount
= cpu_to_be32(data_len
);
3635 data_klm
->key
= cpu_to_be32(data_key
);
3636 data_klm
->va
= cpu_to_be64(data_va
);
3637 wqe_size
= ALIGN(sizeof(*data_klm
), 64);
3640 * Source domain contains signature information
3641 * So need construct a strided block format:
3642 * ---------------------------
3643 * | stride_block_ctrl |
3644 * ---------------------------
3646 * ---------------------------
3648 * ---------------------------
3650 * ---------------------------
3652 struct mlx5_stride_block_ctrl_seg
*sblock_ctrl
;
3653 struct mlx5_stride_block_entry
*data_sentry
;
3654 struct mlx5_stride_block_entry
*prot_sentry
;
3655 u32 prot_key
= wr
->prot
->lkey
;
3656 u64 prot_va
= wr
->prot
->addr
;
3657 u16 block_size
= sig_attrs
->mem
.sig
.dif
.pi_interval
;
3661 data_sentry
= (void *)sblock_ctrl
+ sizeof(*sblock_ctrl
);
3662 prot_sentry
= (void *)data_sentry
+ sizeof(*data_sentry
);
3664 prot_size
= prot_field_size(sig_attrs
->mem
.sig_type
);
3666 pr_err("Bad block size given: %u\n", block_size
);
3669 sblock_ctrl
->bcount_per_cycle
= cpu_to_be32(block_size
+
3671 sblock_ctrl
->op
= cpu_to_be32(MLX5_STRIDE_BLOCK_OP
);
3672 sblock_ctrl
->repeat_count
= cpu_to_be32(data_len
/ block_size
);
3673 sblock_ctrl
->num_entries
= cpu_to_be16(2);
3675 data_sentry
->bcount
= cpu_to_be16(block_size
);
3676 data_sentry
->key
= cpu_to_be32(data_key
);
3677 data_sentry
->va
= cpu_to_be64(data_va
);
3678 data_sentry
->stride
= cpu_to_be16(block_size
);
3680 prot_sentry
->bcount
= cpu_to_be16(prot_size
);
3681 prot_sentry
->key
= cpu_to_be32(prot_key
);
3682 prot_sentry
->va
= cpu_to_be64(prot_va
);
3683 prot_sentry
->stride
= cpu_to_be16(prot_size
);
3685 wqe_size
= ALIGN(sizeof(*sblock_ctrl
) + sizeof(*data_sentry
) +
3686 sizeof(*prot_sentry
), 64);
3690 *size
+= wqe_size
/ 16;
3691 if (unlikely((*seg
== qp
->sq
.qend
)))
3692 *seg
= mlx5_get_send_wqe(qp
, 0);
3695 ret
= mlx5_set_bsf(sig_mr
, sig_attrs
, bsf
, data_len
);
3699 *seg
+= sizeof(*bsf
);
3700 *size
+= sizeof(*bsf
) / 16;
3701 if (unlikely((*seg
== qp
->sq
.qend
)))
3702 *seg
= mlx5_get_send_wqe(qp
, 0);
3707 static void set_sig_mkey_segment(struct mlx5_mkey_seg
*seg
,
3708 struct ib_sig_handover_wr
*wr
, u32 size
,
3709 u32 length
, u32 pdn
)
3711 struct ib_mr
*sig_mr
= wr
->sig_mr
;
3712 u32 sig_key
= sig_mr
->rkey
;
3713 u8 sigerr
= to_mmr(sig_mr
)->sig
->sigerr_count
& 1;
3715 memset(seg
, 0, sizeof(*seg
));
3717 seg
->flags
= get_umr_flags(wr
->access_flags
) |
3718 MLX5_MKC_ACCESS_MODE_KLMS
;
3719 seg
->qpn_mkey7_0
= cpu_to_be32((sig_key
& 0xff) | 0xffffff00);
3720 seg
->flags_pd
= cpu_to_be32(MLX5_MKEY_REMOTE_INVAL
| sigerr
<< 26 |
3721 MLX5_MKEY_BSF_EN
| pdn
);
3722 seg
->len
= cpu_to_be64(length
);
3723 seg
->xlt_oct_size
= cpu_to_be32(get_xlt_octo(size
));
3724 seg
->bsfs_octo_size
= cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE
);
3727 static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg
*umr
,
3730 memset(umr
, 0, sizeof(*umr
));
3732 umr
->flags
= MLX5_FLAGS_INLINE
| MLX5_FLAGS_CHECK_FREE
;
3733 umr
->xlt_octowords
= cpu_to_be16(get_xlt_octo(size
));
3734 umr
->bsf_octowords
= cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE
);
3735 umr
->mkey_mask
= sig_mkey_mask();
3739 static int set_sig_umr_wr(struct ib_send_wr
*send_wr
, struct mlx5_ib_qp
*qp
,
3740 void **seg
, int *size
)
3742 struct ib_sig_handover_wr
*wr
= sig_handover_wr(send_wr
);
3743 struct mlx5_ib_mr
*sig_mr
= to_mmr(wr
->sig_mr
);
3744 u32 pdn
= get_pd(qp
)->pdn
;
3746 int region_len
, ret
;
3748 if (unlikely(wr
->wr
.num_sge
!= 1) ||
3749 unlikely(wr
->access_flags
& IB_ACCESS_REMOTE_ATOMIC
) ||
3750 unlikely(!sig_mr
->sig
) || unlikely(!qp
->signature_en
) ||
3751 unlikely(!sig_mr
->sig
->sig_status_checked
))
3754 /* length of the protected region, data + protection */
3755 region_len
= wr
->wr
.sg_list
->length
;
3757 (wr
->prot
->lkey
!= wr
->wr
.sg_list
->lkey
||
3758 wr
->prot
->addr
!= wr
->wr
.sg_list
->addr
||
3759 wr
->prot
->length
!= wr
->wr
.sg_list
->length
))
3760 region_len
+= wr
->prot
->length
;
3763 * KLM octoword size - if protection was provided
3764 * then we use strided block format (3 octowords),
3765 * else we use single KLM (1 octoword)
3767 xlt_size
= wr
->prot
? 0x30 : sizeof(struct mlx5_klm
);
3769 set_sig_umr_segment(*seg
, xlt_size
);
3770 *seg
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
);
3771 *size
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
) / 16;
3772 if (unlikely((*seg
== qp
->sq
.qend
)))
3773 *seg
= mlx5_get_send_wqe(qp
, 0);
3775 set_sig_mkey_segment(*seg
, wr
, xlt_size
, region_len
, pdn
);
3776 *seg
+= sizeof(struct mlx5_mkey_seg
);
3777 *size
+= sizeof(struct mlx5_mkey_seg
) / 16;
3778 if (unlikely((*seg
== qp
->sq
.qend
)))
3779 *seg
= mlx5_get_send_wqe(qp
, 0);
3781 ret
= set_sig_data_segment(wr
, qp
, seg
, size
);
3785 sig_mr
->sig
->sig_status_checked
= false;
3789 static int set_psv_wr(struct ib_sig_domain
*domain
,
3790 u32 psv_idx
, void **seg
, int *size
)
3792 struct mlx5_seg_set_psv
*psv_seg
= *seg
;
3794 memset(psv_seg
, 0, sizeof(*psv_seg
));
3795 psv_seg
->psv_num
= cpu_to_be32(psv_idx
);
3796 switch (domain
->sig_type
) {
3797 case IB_SIG_TYPE_NONE
:
3799 case IB_SIG_TYPE_T10_DIF
:
3800 psv_seg
->transient_sig
= cpu_to_be32(domain
->sig
.dif
.bg
<< 16 |
3801 domain
->sig
.dif
.app_tag
);
3802 psv_seg
->ref_tag
= cpu_to_be32(domain
->sig
.dif
.ref_tag
);
3805 pr_err("Bad signature type (%d) is given.\n",
3810 *seg
+= sizeof(*psv_seg
);
3811 *size
+= sizeof(*psv_seg
) / 16;
3816 static int set_reg_wr(struct mlx5_ib_qp
*qp
,
3817 struct ib_reg_wr
*wr
,
3818 void **seg
, int *size
)
3820 struct mlx5_ib_mr
*mr
= to_mmr(wr
->mr
);
3821 struct mlx5_ib_pd
*pd
= to_mpd(qp
->ibqp
.pd
);
3823 if (unlikely(wr
->wr
.send_flags
& IB_SEND_INLINE
)) {
3824 mlx5_ib_warn(to_mdev(qp
->ibqp
.device
),
3825 "Invalid IB_SEND_INLINE send flag\n");
3829 set_reg_umr_seg(*seg
, mr
);
3830 *seg
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
);
3831 *size
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
) / 16;
3832 if (unlikely((*seg
== qp
->sq
.qend
)))
3833 *seg
= mlx5_get_send_wqe(qp
, 0);
3835 set_reg_mkey_seg(*seg
, mr
, wr
->key
, wr
->access
);
3836 *seg
+= sizeof(struct mlx5_mkey_seg
);
3837 *size
+= sizeof(struct mlx5_mkey_seg
) / 16;
3838 if (unlikely((*seg
== qp
->sq
.qend
)))
3839 *seg
= mlx5_get_send_wqe(qp
, 0);
3841 set_reg_data_seg(*seg
, mr
, pd
);
3842 *seg
+= sizeof(struct mlx5_wqe_data_seg
);
3843 *size
+= (sizeof(struct mlx5_wqe_data_seg
) / 16);
3848 static void set_linv_wr(struct mlx5_ib_qp
*qp
, void **seg
, int *size
)
3850 set_linv_umr_seg(*seg
);
3851 *seg
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
);
3852 *size
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
) / 16;
3853 if (unlikely((*seg
== qp
->sq
.qend
)))
3854 *seg
= mlx5_get_send_wqe(qp
, 0);
3855 set_linv_mkey_seg(*seg
);
3856 *seg
+= sizeof(struct mlx5_mkey_seg
);
3857 *size
+= sizeof(struct mlx5_mkey_seg
) / 16;
3858 if (unlikely((*seg
== qp
->sq
.qend
)))
3859 *seg
= mlx5_get_send_wqe(qp
, 0);
3862 static void dump_wqe(struct mlx5_ib_qp
*qp
, int idx
, int size_16
)
3868 pr_debug("dump wqe at %p\n", mlx5_get_send_wqe(qp
, tidx
));
3869 for (i
= 0, j
= 0; i
< size_16
* 4; i
+= 4, j
+= 4) {
3870 if ((i
& 0xf) == 0) {
3871 void *buf
= mlx5_get_send_wqe(qp
, tidx
);
3872 tidx
= (tidx
+ 1) & (qp
->sq
.wqe_cnt
- 1);
3876 pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p
[j
]),
3877 be32_to_cpu(p
[j
+ 1]), be32_to_cpu(p
[j
+ 2]),
3878 be32_to_cpu(p
[j
+ 3]));
3882 static int begin_wqe(struct mlx5_ib_qp
*qp
, void **seg
,
3883 struct mlx5_wqe_ctrl_seg
**ctrl
,
3884 struct ib_send_wr
*wr
, unsigned *idx
,
3885 int *size
, int nreq
)
3887 if (unlikely(mlx5_wq_overflow(&qp
->sq
, nreq
, qp
->ibqp
.send_cq
)))
3890 *idx
= qp
->sq
.cur_post
& (qp
->sq
.wqe_cnt
- 1);
3891 *seg
= mlx5_get_send_wqe(qp
, *idx
);
3893 *(uint32_t *)(*seg
+ 8) = 0;
3894 (*ctrl
)->imm
= send_ieth(wr
);
3895 (*ctrl
)->fm_ce_se
= qp
->sq_signal_bits
|
3896 (wr
->send_flags
& IB_SEND_SIGNALED
?
3897 MLX5_WQE_CTRL_CQ_UPDATE
: 0) |
3898 (wr
->send_flags
& IB_SEND_SOLICITED
?
3899 MLX5_WQE_CTRL_SOLICITED
: 0);
3901 *seg
+= sizeof(**ctrl
);
3902 *size
= sizeof(**ctrl
) / 16;
3907 static void finish_wqe(struct mlx5_ib_qp
*qp
,
3908 struct mlx5_wqe_ctrl_seg
*ctrl
,
3909 u8 size
, unsigned idx
, u64 wr_id
,
3910 int nreq
, u8 fence
, u32 mlx5_opcode
)
3914 ctrl
->opmod_idx_opcode
= cpu_to_be32(((u32
)(qp
->sq
.cur_post
) << 8) |
3915 mlx5_opcode
| ((u32
)opmod
<< 24));
3916 ctrl
->qpn_ds
= cpu_to_be32(size
| (qp
->trans_qp
.base
.mqp
.qpn
<< 8));
3917 ctrl
->fm_ce_se
|= fence
;
3918 if (unlikely(qp
->wq_sig
))
3919 ctrl
->signature
= wq_sig(ctrl
);
3921 qp
->sq
.wrid
[idx
] = wr_id
;
3922 qp
->sq
.w_list
[idx
].opcode
= mlx5_opcode
;
3923 qp
->sq
.wqe_head
[idx
] = qp
->sq
.head
+ nreq
;
3924 qp
->sq
.cur_post
+= DIV_ROUND_UP(size
* 16, MLX5_SEND_WQE_BB
);
3925 qp
->sq
.w_list
[idx
].next
= qp
->sq
.cur_post
;
3929 int mlx5_ib_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
3930 struct ib_send_wr
**bad_wr
)
3932 struct mlx5_wqe_ctrl_seg
*ctrl
= NULL
; /* compiler warning */
3933 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
3934 struct mlx5_core_dev
*mdev
= dev
->mdev
;
3935 struct mlx5_ib_qp
*qp
;
3936 struct mlx5_ib_mr
*mr
;
3937 struct mlx5_wqe_data_seg
*dpseg
;
3938 struct mlx5_wqe_xrc_seg
*xrc
;
3940 int uninitialized_var(size
);
3942 unsigned long flags
;
3952 if (unlikely(ibqp
->qp_type
== IB_QPT_GSI
))
3953 return mlx5_ib_gsi_post_send(ibqp
, wr
, bad_wr
);
3959 spin_lock_irqsave(&qp
->sq
.lock
, flags
);
3961 if (mdev
->state
== MLX5_DEVICE_STATE_INTERNAL_ERROR
) {
3968 for (nreq
= 0; wr
; nreq
++, wr
= wr
->next
) {
3969 if (unlikely(wr
->opcode
>= ARRAY_SIZE(mlx5_ib_opcode
))) {
3970 mlx5_ib_warn(dev
, "\n");
3976 num_sge
= wr
->num_sge
;
3977 if (unlikely(num_sge
> qp
->sq
.max_gs
)) {
3978 mlx5_ib_warn(dev
, "\n");
3984 err
= begin_wqe(qp
, &seg
, &ctrl
, wr
, &idx
, &size
, nreq
);
3986 mlx5_ib_warn(dev
, "\n");
3992 if (wr
->opcode
== IB_WR_REG_MR
) {
3993 fence
= dev
->umr_fence
;
3994 next_fence
= MLX5_FENCE_MODE_INITIATOR_SMALL
;
3996 if (wr
->send_flags
& IB_SEND_FENCE
) {
3998 fence
= MLX5_FENCE_MODE_SMALL_AND_FENCE
;
4000 fence
= MLX5_FENCE_MODE_FENCE
;
4002 fence
= qp
->next_fence
;
4006 switch (ibqp
->qp_type
) {
4007 case IB_QPT_XRC_INI
:
4009 seg
+= sizeof(*xrc
);
4010 size
+= sizeof(*xrc
) / 16;
4013 switch (wr
->opcode
) {
4014 case IB_WR_RDMA_READ
:
4015 case IB_WR_RDMA_WRITE
:
4016 case IB_WR_RDMA_WRITE_WITH_IMM
:
4017 set_raddr_seg(seg
, rdma_wr(wr
)->remote_addr
,
4019 seg
+= sizeof(struct mlx5_wqe_raddr_seg
);
4020 size
+= sizeof(struct mlx5_wqe_raddr_seg
) / 16;
4023 case IB_WR_ATOMIC_CMP_AND_SWP
:
4024 case IB_WR_ATOMIC_FETCH_AND_ADD
:
4025 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP
:
4026 mlx5_ib_warn(dev
, "Atomic operations are not supported yet\n");
4031 case IB_WR_LOCAL_INV
:
4032 qp
->sq
.wr_data
[idx
] = IB_WR_LOCAL_INV
;
4033 ctrl
->imm
= cpu_to_be32(wr
->ex
.invalidate_rkey
);
4034 set_linv_wr(qp
, &seg
, &size
);
4039 qp
->sq
.wr_data
[idx
] = IB_WR_REG_MR
;
4040 ctrl
->imm
= cpu_to_be32(reg_wr(wr
)->key
);
4041 err
= set_reg_wr(qp
, reg_wr(wr
), &seg
, &size
);
4049 case IB_WR_REG_SIG_MR
:
4050 qp
->sq
.wr_data
[idx
] = IB_WR_REG_SIG_MR
;
4051 mr
= to_mmr(sig_handover_wr(wr
)->sig_mr
);
4053 ctrl
->imm
= cpu_to_be32(mr
->ibmr
.rkey
);
4054 err
= set_sig_umr_wr(wr
, qp
, &seg
, &size
);
4056 mlx5_ib_warn(dev
, "\n");
4061 finish_wqe(qp
, ctrl
, size
, idx
, wr
->wr_id
, nreq
,
4062 fence
, MLX5_OPCODE_UMR
);
4064 * SET_PSV WQEs are not signaled and solicited
4067 wr
->send_flags
&= ~IB_SEND_SIGNALED
;
4068 wr
->send_flags
|= IB_SEND_SOLICITED
;
4069 err
= begin_wqe(qp
, &seg
, &ctrl
, wr
,
4072 mlx5_ib_warn(dev
, "\n");
4078 err
= set_psv_wr(&sig_handover_wr(wr
)->sig_attrs
->mem
,
4079 mr
->sig
->psv_memory
.psv_idx
, &seg
,
4082 mlx5_ib_warn(dev
, "\n");
4087 finish_wqe(qp
, ctrl
, size
, idx
, wr
->wr_id
, nreq
,
4088 fence
, MLX5_OPCODE_SET_PSV
);
4089 err
= begin_wqe(qp
, &seg
, &ctrl
, wr
,
4092 mlx5_ib_warn(dev
, "\n");
4098 err
= set_psv_wr(&sig_handover_wr(wr
)->sig_attrs
->wire
,
4099 mr
->sig
->psv_wire
.psv_idx
, &seg
,
4102 mlx5_ib_warn(dev
, "\n");
4107 finish_wqe(qp
, ctrl
, size
, idx
, wr
->wr_id
, nreq
,
4108 fence
, MLX5_OPCODE_SET_PSV
);
4109 qp
->next_fence
= MLX5_FENCE_MODE_INITIATOR_SMALL
;
4119 switch (wr
->opcode
) {
4120 case IB_WR_RDMA_WRITE
:
4121 case IB_WR_RDMA_WRITE_WITH_IMM
:
4122 set_raddr_seg(seg
, rdma_wr(wr
)->remote_addr
,
4124 seg
+= sizeof(struct mlx5_wqe_raddr_seg
);
4125 size
+= sizeof(struct mlx5_wqe_raddr_seg
) / 16;
4134 if (unlikely(!mdev
->port_caps
[qp
->port
- 1].has_smi
)) {
4135 mlx5_ib_warn(dev
, "Send SMP MADs is not allowed\n");
4141 case MLX5_IB_QPT_HW_GSI
:
4142 set_datagram_seg(seg
, wr
);
4143 seg
+= sizeof(struct mlx5_wqe_datagram_seg
);
4144 size
+= sizeof(struct mlx5_wqe_datagram_seg
) / 16;
4145 if (unlikely((seg
== qend
)))
4146 seg
= mlx5_get_send_wqe(qp
, 0);
4149 set_datagram_seg(seg
, wr
);
4150 seg
+= sizeof(struct mlx5_wqe_datagram_seg
);
4151 size
+= sizeof(struct mlx5_wqe_datagram_seg
) / 16;
4153 if (unlikely((seg
== qend
)))
4154 seg
= mlx5_get_send_wqe(qp
, 0);
4156 /* handle qp that supports ud offload */
4157 if (qp
->flags
& IB_QP_CREATE_IPOIB_UD_LSO
) {
4158 struct mlx5_wqe_eth_pad
*pad
;
4161 memset(pad
, 0, sizeof(struct mlx5_wqe_eth_pad
));
4162 seg
+= sizeof(struct mlx5_wqe_eth_pad
);
4163 size
+= sizeof(struct mlx5_wqe_eth_pad
) / 16;
4165 seg
= set_eth_seg(seg
, wr
, qend
, qp
, &size
);
4167 if (unlikely((seg
== qend
)))
4168 seg
= mlx5_get_send_wqe(qp
, 0);
4171 case MLX5_IB_QPT_REG_UMR
:
4172 if (wr
->opcode
!= MLX5_IB_WR_UMR
) {
4174 mlx5_ib_warn(dev
, "bad opcode\n");
4177 qp
->sq
.wr_data
[idx
] = MLX5_IB_WR_UMR
;
4178 ctrl
->imm
= cpu_to_be32(umr_wr(wr
)->mkey
);
4179 set_reg_umr_segment(seg
, wr
, !!(MLX5_CAP_GEN(mdev
, atomic
)));
4180 seg
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
);
4181 size
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
) / 16;
4182 if (unlikely((seg
== qend
)))
4183 seg
= mlx5_get_send_wqe(qp
, 0);
4184 set_reg_mkey_segment(seg
, wr
);
4185 seg
+= sizeof(struct mlx5_mkey_seg
);
4186 size
+= sizeof(struct mlx5_mkey_seg
) / 16;
4187 if (unlikely((seg
== qend
)))
4188 seg
= mlx5_get_send_wqe(qp
, 0);
4195 if (wr
->send_flags
& IB_SEND_INLINE
&& num_sge
) {
4196 int uninitialized_var(sz
);
4198 err
= set_data_inl_seg(qp
, wr
, seg
, &sz
);
4199 if (unlikely(err
)) {
4200 mlx5_ib_warn(dev
, "\n");
4207 for (i
= 0; i
< num_sge
; i
++) {
4208 if (unlikely(dpseg
== qend
)) {
4209 seg
= mlx5_get_send_wqe(qp
, 0);
4212 if (likely(wr
->sg_list
[i
].length
)) {
4213 set_data_ptr_seg(dpseg
, wr
->sg_list
+ i
);
4214 size
+= sizeof(struct mlx5_wqe_data_seg
) / 16;
4220 qp
->next_fence
= next_fence
;
4221 finish_wqe(qp
, ctrl
, size
, idx
, wr
->wr_id
, nreq
, fence
,
4222 mlx5_ib_opcode
[wr
->opcode
]);
4225 dump_wqe(qp
, idx
, size
);
4230 qp
->sq
.head
+= nreq
;
4232 /* Make sure that descriptors are written before
4233 * updating doorbell record and ringing the doorbell
4237 qp
->db
.db
[MLX5_SND_DBR
] = cpu_to_be32(qp
->sq
.cur_post
);
4239 /* Make sure doorbell record is visible to the HCA before
4240 * we hit doorbell */
4243 /* currently we support only regular doorbells */
4244 mlx5_write64((__be32
*)ctrl
, bf
->bfreg
->map
+ bf
->offset
, NULL
);
4245 /* Make sure doorbells don't leak out of SQ spinlock
4246 * and reach the HCA out of order.
4249 bf
->offset
^= bf
->buf_size
;
4252 spin_unlock_irqrestore(&qp
->sq
.lock
, flags
);
4257 static void set_sig_seg(struct mlx5_rwqe_sig
*sig
, int size
)
4259 sig
->signature
= calc_sig(sig
, size
);
4262 int mlx5_ib_post_recv(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
4263 struct ib_recv_wr
**bad_wr
)
4265 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
4266 struct mlx5_wqe_data_seg
*scat
;
4267 struct mlx5_rwqe_sig
*sig
;
4268 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
4269 struct mlx5_core_dev
*mdev
= dev
->mdev
;
4270 unsigned long flags
;
4276 if (unlikely(ibqp
->qp_type
== IB_QPT_GSI
))
4277 return mlx5_ib_gsi_post_recv(ibqp
, wr
, bad_wr
);
4279 spin_lock_irqsave(&qp
->rq
.lock
, flags
);
4281 if (mdev
->state
== MLX5_DEVICE_STATE_INTERNAL_ERROR
) {
4288 ind
= qp
->rq
.head
& (qp
->rq
.wqe_cnt
- 1);
4290 for (nreq
= 0; wr
; nreq
++, wr
= wr
->next
) {
4291 if (mlx5_wq_overflow(&qp
->rq
, nreq
, qp
->ibqp
.recv_cq
)) {
4297 if (unlikely(wr
->num_sge
> qp
->rq
.max_gs
)) {
4303 scat
= get_recv_wqe(qp
, ind
);
4307 for (i
= 0; i
< wr
->num_sge
; i
++)
4308 set_data_ptr_seg(scat
+ i
, wr
->sg_list
+ i
);
4310 if (i
< qp
->rq
.max_gs
) {
4311 scat
[i
].byte_count
= 0;
4312 scat
[i
].lkey
= cpu_to_be32(MLX5_INVALID_LKEY
);
4317 sig
= (struct mlx5_rwqe_sig
*)scat
;
4318 set_sig_seg(sig
, (qp
->rq
.max_gs
+ 1) << 2);
4321 qp
->rq
.wrid
[ind
] = wr
->wr_id
;
4323 ind
= (ind
+ 1) & (qp
->rq
.wqe_cnt
- 1);
4328 qp
->rq
.head
+= nreq
;
4330 /* Make sure that descriptors are written before
4335 *qp
->db
.db
= cpu_to_be32(qp
->rq
.head
& 0xffff);
4338 spin_unlock_irqrestore(&qp
->rq
.lock
, flags
);
4343 static inline enum ib_qp_state
to_ib_qp_state(enum mlx5_qp_state mlx5_state
)
4345 switch (mlx5_state
) {
4346 case MLX5_QP_STATE_RST
: return IB_QPS_RESET
;
4347 case MLX5_QP_STATE_INIT
: return IB_QPS_INIT
;
4348 case MLX5_QP_STATE_RTR
: return IB_QPS_RTR
;
4349 case MLX5_QP_STATE_RTS
: return IB_QPS_RTS
;
4350 case MLX5_QP_STATE_SQ_DRAINING
:
4351 case MLX5_QP_STATE_SQD
: return IB_QPS_SQD
;
4352 case MLX5_QP_STATE_SQER
: return IB_QPS_SQE
;
4353 case MLX5_QP_STATE_ERR
: return IB_QPS_ERR
;
4358 static inline enum ib_mig_state
to_ib_mig_state(int mlx5_mig_state
)
4360 switch (mlx5_mig_state
) {
4361 case MLX5_QP_PM_ARMED
: return IB_MIG_ARMED
;
4362 case MLX5_QP_PM_REARM
: return IB_MIG_REARM
;
4363 case MLX5_QP_PM_MIGRATED
: return IB_MIG_MIGRATED
;
4368 static int to_ib_qp_access_flags(int mlx5_flags
)
4372 if (mlx5_flags
& MLX5_QP_BIT_RRE
)
4373 ib_flags
|= IB_ACCESS_REMOTE_READ
;
4374 if (mlx5_flags
& MLX5_QP_BIT_RWE
)
4375 ib_flags
|= IB_ACCESS_REMOTE_WRITE
;
4376 if (mlx5_flags
& MLX5_QP_BIT_RAE
)
4377 ib_flags
|= IB_ACCESS_REMOTE_ATOMIC
;
4382 static void to_rdma_ah_attr(struct mlx5_ib_dev
*ibdev
,
4383 struct rdma_ah_attr
*ah_attr
,
4384 struct mlx5_qp_path
*path
)
4386 struct mlx5_core_dev
*dev
= ibdev
->mdev
;
4388 memset(ah_attr
, 0, sizeof(*ah_attr
));
4390 if (!path
->port
|| path
->port
> MLX5_CAP_GEN(dev
, num_ports
))
4393 ah_attr
->type
= rdma_ah_find_type(&ibdev
->ib_dev
, path
->port
);
4395 rdma_ah_set_port_num(ah_attr
, path
->port
);
4396 rdma_ah_set_sl(ah_attr
, path
->dci_cfi_prio_sl
& 0xf);
4398 rdma_ah_set_dlid(ah_attr
, be16_to_cpu(path
->rlid
));
4399 rdma_ah_set_path_bits(ah_attr
, path
->grh_mlid
& 0x7f);
4400 rdma_ah_set_static_rate(ah_attr
,
4401 path
->static_rate
? path
->static_rate
- 5 : 0);
4402 if (path
->grh_mlid
& (1 << 7)) {
4403 u32 tc_fl
= be32_to_cpu(path
->tclass_flowlabel
);
4405 rdma_ah_set_grh(ah_attr
, NULL
,
4409 (tc_fl
>> 20) & 0xff);
4410 rdma_ah_set_dgid_raw(ah_attr
, path
->rgid
);
4414 static int query_raw_packet_qp_sq_state(struct mlx5_ib_dev
*dev
,
4415 struct mlx5_ib_sq
*sq
,
4423 inlen
= MLX5_ST_SZ_BYTES(query_sq_out
);
4424 out
= kvzalloc(inlen
, GFP_KERNEL
);
4428 err
= mlx5_core_query_sq(dev
->mdev
, sq
->base
.mqp
.qpn
, out
);
4432 sqc
= MLX5_ADDR_OF(query_sq_out
, out
, sq_context
);
4433 *sq_state
= MLX5_GET(sqc
, sqc
, state
);
4434 sq
->state
= *sq_state
;
4441 static int query_raw_packet_qp_rq_state(struct mlx5_ib_dev
*dev
,
4442 struct mlx5_ib_rq
*rq
,
4450 inlen
= MLX5_ST_SZ_BYTES(query_rq_out
);
4451 out
= kvzalloc(inlen
, GFP_KERNEL
);
4455 err
= mlx5_core_query_rq(dev
->mdev
, rq
->base
.mqp
.qpn
, out
);
4459 rqc
= MLX5_ADDR_OF(query_rq_out
, out
, rq_context
);
4460 *rq_state
= MLX5_GET(rqc
, rqc
, state
);
4461 rq
->state
= *rq_state
;
4468 static int sqrq_state_to_qp_state(u8 sq_state
, u8 rq_state
,
4469 struct mlx5_ib_qp
*qp
, u8
*qp_state
)
4471 static const u8 sqrq_trans
[MLX5_RQ_NUM_STATE
][MLX5_SQ_NUM_STATE
] = {
4472 [MLX5_RQC_STATE_RST
] = {
4473 [MLX5_SQC_STATE_RST
] = IB_QPS_RESET
,
4474 [MLX5_SQC_STATE_RDY
] = MLX5_QP_STATE_BAD
,
4475 [MLX5_SQC_STATE_ERR
] = MLX5_QP_STATE_BAD
,
4476 [MLX5_SQ_STATE_NA
] = IB_QPS_RESET
,
4478 [MLX5_RQC_STATE_RDY
] = {
4479 [MLX5_SQC_STATE_RST
] = MLX5_QP_STATE_BAD
,
4480 [MLX5_SQC_STATE_RDY
] = MLX5_QP_STATE
,
4481 [MLX5_SQC_STATE_ERR
] = IB_QPS_SQE
,
4482 [MLX5_SQ_STATE_NA
] = MLX5_QP_STATE
,
4484 [MLX5_RQC_STATE_ERR
] = {
4485 [MLX5_SQC_STATE_RST
] = MLX5_QP_STATE_BAD
,
4486 [MLX5_SQC_STATE_RDY
] = MLX5_QP_STATE_BAD
,
4487 [MLX5_SQC_STATE_ERR
] = IB_QPS_ERR
,
4488 [MLX5_SQ_STATE_NA
] = IB_QPS_ERR
,
4490 [MLX5_RQ_STATE_NA
] = {
4491 [MLX5_SQC_STATE_RST
] = IB_QPS_RESET
,
4492 [MLX5_SQC_STATE_RDY
] = MLX5_QP_STATE
,
4493 [MLX5_SQC_STATE_ERR
] = MLX5_QP_STATE
,
4494 [MLX5_SQ_STATE_NA
] = MLX5_QP_STATE_BAD
,
4498 *qp_state
= sqrq_trans
[rq_state
][sq_state
];
4500 if (*qp_state
== MLX5_QP_STATE_BAD
) {
4501 WARN(1, "Buggy Raw Packet QP state, SQ 0x%x state: 0x%x, RQ 0x%x state: 0x%x",
4502 qp
->raw_packet_qp
.sq
.base
.mqp
.qpn
, sq_state
,
4503 qp
->raw_packet_qp
.rq
.base
.mqp
.qpn
, rq_state
);
4507 if (*qp_state
== MLX5_QP_STATE
)
4508 *qp_state
= qp
->state
;
4513 static int query_raw_packet_qp_state(struct mlx5_ib_dev
*dev
,
4514 struct mlx5_ib_qp
*qp
,
4515 u8
*raw_packet_qp_state
)
4517 struct mlx5_ib_raw_packet_qp
*raw_packet_qp
= &qp
->raw_packet_qp
;
4518 struct mlx5_ib_sq
*sq
= &raw_packet_qp
->sq
;
4519 struct mlx5_ib_rq
*rq
= &raw_packet_qp
->rq
;
4521 u8 sq_state
= MLX5_SQ_STATE_NA
;
4522 u8 rq_state
= MLX5_RQ_STATE_NA
;
4524 if (qp
->sq
.wqe_cnt
) {
4525 err
= query_raw_packet_qp_sq_state(dev
, sq
, &sq_state
);
4530 if (qp
->rq
.wqe_cnt
) {
4531 err
= query_raw_packet_qp_rq_state(dev
, rq
, &rq_state
);
4536 return sqrq_state_to_qp_state(sq_state
, rq_state
, qp
,
4537 raw_packet_qp_state
);
4540 static int query_qp_attr(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
4541 struct ib_qp_attr
*qp_attr
)
4543 int outlen
= MLX5_ST_SZ_BYTES(query_qp_out
);
4544 struct mlx5_qp_context
*context
;
4549 outb
= kzalloc(outlen
, GFP_KERNEL
);
4553 err
= mlx5_core_qp_query(dev
->mdev
, &qp
->trans_qp
.base
.mqp
, outb
,
4558 /* FIXME: use MLX5_GET rather than mlx5_qp_context manual struct */
4559 context
= (struct mlx5_qp_context
*)MLX5_ADDR_OF(query_qp_out
, outb
, qpc
);
4561 mlx5_state
= be32_to_cpu(context
->flags
) >> 28;
4563 qp
->state
= to_ib_qp_state(mlx5_state
);
4564 qp_attr
->path_mtu
= context
->mtu_msgmax
>> 5;
4565 qp_attr
->path_mig_state
=
4566 to_ib_mig_state((be32_to_cpu(context
->flags
) >> 11) & 0x3);
4567 qp_attr
->qkey
= be32_to_cpu(context
->qkey
);
4568 qp_attr
->rq_psn
= be32_to_cpu(context
->rnr_nextrecvpsn
) & 0xffffff;
4569 qp_attr
->sq_psn
= be32_to_cpu(context
->next_send_psn
) & 0xffffff;
4570 qp_attr
->dest_qp_num
= be32_to_cpu(context
->log_pg_sz_remote_qpn
) & 0xffffff;
4571 qp_attr
->qp_access_flags
=
4572 to_ib_qp_access_flags(be32_to_cpu(context
->params2
));
4574 if (qp
->ibqp
.qp_type
== IB_QPT_RC
|| qp
->ibqp
.qp_type
== IB_QPT_UC
) {
4575 to_rdma_ah_attr(dev
, &qp_attr
->ah_attr
, &context
->pri_path
);
4576 to_rdma_ah_attr(dev
, &qp_attr
->alt_ah_attr
, &context
->alt_path
);
4577 qp_attr
->alt_pkey_index
=
4578 be16_to_cpu(context
->alt_path
.pkey_index
);
4579 qp_attr
->alt_port_num
=
4580 rdma_ah_get_port_num(&qp_attr
->alt_ah_attr
);
4583 qp_attr
->pkey_index
= be16_to_cpu(context
->pri_path
.pkey_index
);
4584 qp_attr
->port_num
= context
->pri_path
.port
;
4586 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
4587 qp_attr
->sq_draining
= mlx5_state
== MLX5_QP_STATE_SQ_DRAINING
;
4589 qp_attr
->max_rd_atomic
= 1 << ((be32_to_cpu(context
->params1
) >> 21) & 0x7);
4591 qp_attr
->max_dest_rd_atomic
=
4592 1 << ((be32_to_cpu(context
->params2
) >> 21) & 0x7);
4593 qp_attr
->min_rnr_timer
=
4594 (be32_to_cpu(context
->rnr_nextrecvpsn
) >> 24) & 0x1f;
4595 qp_attr
->timeout
= context
->pri_path
.ackto_lt
>> 3;
4596 qp_attr
->retry_cnt
= (be32_to_cpu(context
->params1
) >> 16) & 0x7;
4597 qp_attr
->rnr_retry
= (be32_to_cpu(context
->params1
) >> 13) & 0x7;
4598 qp_attr
->alt_timeout
= context
->alt_path
.ackto_lt
>> 3;
4605 int mlx5_ib_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*qp_attr
,
4606 int qp_attr_mask
, struct ib_qp_init_attr
*qp_init_attr
)
4608 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
4609 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
4611 u8 raw_packet_qp_state
;
4613 if (ibqp
->rwq_ind_tbl
)
4616 if (unlikely(ibqp
->qp_type
== IB_QPT_GSI
))
4617 return mlx5_ib_gsi_query_qp(ibqp
, qp_attr
, qp_attr_mask
,
4620 /* Not all of output fields are applicable, make sure to zero them */
4621 memset(qp_init_attr
, 0, sizeof(*qp_init_attr
));
4622 memset(qp_attr
, 0, sizeof(*qp_attr
));
4624 mutex_lock(&qp
->mutex
);
4626 if (qp
->ibqp
.qp_type
== IB_QPT_RAW_PACKET
||
4627 qp
->flags
& MLX5_IB_QP_UNDERLAY
) {
4628 err
= query_raw_packet_qp_state(dev
, qp
, &raw_packet_qp_state
);
4631 qp
->state
= raw_packet_qp_state
;
4632 qp_attr
->port_num
= 1;
4634 err
= query_qp_attr(dev
, qp
, qp_attr
);
4639 qp_attr
->qp_state
= qp
->state
;
4640 qp_attr
->cur_qp_state
= qp_attr
->qp_state
;
4641 qp_attr
->cap
.max_recv_wr
= qp
->rq
.wqe_cnt
;
4642 qp_attr
->cap
.max_recv_sge
= qp
->rq
.max_gs
;
4644 if (!ibqp
->uobject
) {
4645 qp_attr
->cap
.max_send_wr
= qp
->sq
.max_post
;
4646 qp_attr
->cap
.max_send_sge
= qp
->sq
.max_gs
;
4647 qp_init_attr
->qp_context
= ibqp
->qp_context
;
4649 qp_attr
->cap
.max_send_wr
= 0;
4650 qp_attr
->cap
.max_send_sge
= 0;
4653 qp_init_attr
->qp_type
= ibqp
->qp_type
;
4654 qp_init_attr
->recv_cq
= ibqp
->recv_cq
;
4655 qp_init_attr
->send_cq
= ibqp
->send_cq
;
4656 qp_init_attr
->srq
= ibqp
->srq
;
4657 qp_attr
->cap
.max_inline_data
= qp
->max_inline_data
;
4659 qp_init_attr
->cap
= qp_attr
->cap
;
4661 qp_init_attr
->create_flags
= 0;
4662 if (qp
->flags
& MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK
)
4663 qp_init_attr
->create_flags
|= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
;
4665 if (qp
->flags
& MLX5_IB_QP_CROSS_CHANNEL
)
4666 qp_init_attr
->create_flags
|= IB_QP_CREATE_CROSS_CHANNEL
;
4667 if (qp
->flags
& MLX5_IB_QP_MANAGED_SEND
)
4668 qp_init_attr
->create_flags
|= IB_QP_CREATE_MANAGED_SEND
;
4669 if (qp
->flags
& MLX5_IB_QP_MANAGED_RECV
)
4670 qp_init_attr
->create_flags
|= IB_QP_CREATE_MANAGED_RECV
;
4671 if (qp
->flags
& MLX5_IB_QP_SQPN_QP1
)
4672 qp_init_attr
->create_flags
|= mlx5_ib_create_qp_sqpn_qp1();
4674 qp_init_attr
->sq_sig_type
= qp
->sq_signal_bits
& MLX5_WQE_CTRL_CQ_UPDATE
?
4675 IB_SIGNAL_ALL_WR
: IB_SIGNAL_REQ_WR
;
4678 mutex_unlock(&qp
->mutex
);
4682 struct ib_xrcd
*mlx5_ib_alloc_xrcd(struct ib_device
*ibdev
,
4683 struct ib_ucontext
*context
,
4684 struct ib_udata
*udata
)
4686 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
4687 struct mlx5_ib_xrcd
*xrcd
;
4690 if (!MLX5_CAP_GEN(dev
->mdev
, xrc
))
4691 return ERR_PTR(-ENOSYS
);
4693 xrcd
= kmalloc(sizeof(*xrcd
), GFP_KERNEL
);
4695 return ERR_PTR(-ENOMEM
);
4697 err
= mlx5_core_xrcd_alloc(dev
->mdev
, &xrcd
->xrcdn
);
4700 return ERR_PTR(-ENOMEM
);
4703 return &xrcd
->ibxrcd
;
4706 int mlx5_ib_dealloc_xrcd(struct ib_xrcd
*xrcd
)
4708 struct mlx5_ib_dev
*dev
= to_mdev(xrcd
->device
);
4709 u32 xrcdn
= to_mxrcd(xrcd
)->xrcdn
;
4712 err
= mlx5_core_xrcd_dealloc(dev
->mdev
, xrcdn
);
4714 mlx5_ib_warn(dev
, "failed to dealloc xrcdn 0x%x\n", xrcdn
);
4720 static void mlx5_ib_wq_event(struct mlx5_core_qp
*core_qp
, int type
)
4722 struct mlx5_ib_rwq
*rwq
= to_mibrwq(core_qp
);
4723 struct mlx5_ib_dev
*dev
= to_mdev(rwq
->ibwq
.device
);
4724 struct ib_event event
;
4726 if (rwq
->ibwq
.event_handler
) {
4727 event
.device
= rwq
->ibwq
.device
;
4728 event
.element
.wq
= &rwq
->ibwq
;
4730 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR
:
4731 event
.event
= IB_EVENT_WQ_FATAL
;
4734 mlx5_ib_warn(dev
, "Unexpected event type %d on WQ %06x\n", type
, core_qp
->qpn
);
4738 rwq
->ibwq
.event_handler(&event
, rwq
->ibwq
.wq_context
);
4742 static int set_delay_drop(struct mlx5_ib_dev
*dev
)
4746 mutex_lock(&dev
->delay_drop
.lock
);
4747 if (dev
->delay_drop
.activate
)
4750 err
= mlx5_core_set_delay_drop(dev
->mdev
, dev
->delay_drop
.timeout
);
4754 dev
->delay_drop
.activate
= true;
4756 mutex_unlock(&dev
->delay_drop
.lock
);
4759 atomic_inc(&dev
->delay_drop
.rqs_cnt
);
4763 static int create_rq(struct mlx5_ib_rwq
*rwq
, struct ib_pd
*pd
,
4764 struct ib_wq_init_attr
*init_attr
)
4766 struct mlx5_ib_dev
*dev
;
4767 int has_net_offloads
;
4775 dev
= to_mdev(pd
->device
);
4777 inlen
= MLX5_ST_SZ_BYTES(create_rq_in
) + sizeof(u64
) * rwq
->rq_num_pas
;
4778 in
= kvzalloc(inlen
, GFP_KERNEL
);
4782 rqc
= MLX5_ADDR_OF(create_rq_in
, in
, ctx
);
4783 MLX5_SET(rqc
, rqc
, mem_rq_type
,
4784 MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE
);
4785 MLX5_SET(rqc
, rqc
, user_index
, rwq
->user_index
);
4786 MLX5_SET(rqc
, rqc
, cqn
, to_mcq(init_attr
->cq
)->mcq
.cqn
);
4787 MLX5_SET(rqc
, rqc
, state
, MLX5_RQC_STATE_RST
);
4788 MLX5_SET(rqc
, rqc
, flush_in_error_en
, 1);
4789 wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
4790 MLX5_SET(wq
, wq
, wq_type
,
4791 rwq
->create_flags
& MLX5_IB_WQ_FLAGS_STRIDING_RQ
?
4792 MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ
: MLX5_WQ_TYPE_CYCLIC
);
4793 if (init_attr
->create_flags
& IB_WQ_FLAGS_PCI_WRITE_END_PADDING
) {
4794 if (!MLX5_CAP_GEN(dev
->mdev
, end_pad
)) {
4795 mlx5_ib_dbg(dev
, "Scatter end padding is not supported\n");
4799 MLX5_SET(wq
, wq
, end_padding_mode
, MLX5_WQ_END_PAD_MODE_ALIGN
);
4802 MLX5_SET(wq
, wq
, log_wq_stride
, rwq
->log_rq_stride
);
4803 if (rwq
->create_flags
& MLX5_IB_WQ_FLAGS_STRIDING_RQ
) {
4804 MLX5_SET(wq
, wq
, two_byte_shift_en
, rwq
->two_byte_shift_en
);
4805 MLX5_SET(wq
, wq
, log_wqe_stride_size
,
4806 rwq
->single_stride_log_num_of_bytes
-
4807 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES
);
4808 MLX5_SET(wq
, wq
, log_wqe_num_of_strides
, rwq
->log_num_strides
-
4809 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES
);
4811 MLX5_SET(wq
, wq
, log_wq_sz
, rwq
->log_rq_size
);
4812 MLX5_SET(wq
, wq
, pd
, to_mpd(pd
)->pdn
);
4813 MLX5_SET(wq
, wq
, page_offset
, rwq
->rq_page_offset
);
4814 MLX5_SET(wq
, wq
, log_wq_pg_sz
, rwq
->log_page_size
);
4815 MLX5_SET(wq
, wq
, wq_signature
, rwq
->wq_sig
);
4816 MLX5_SET64(wq
, wq
, dbr_addr
, rwq
->db
.dma
);
4817 has_net_offloads
= MLX5_CAP_GEN(dev
->mdev
, eth_net_offloads
);
4818 if (init_attr
->create_flags
& IB_WQ_FLAGS_CVLAN_STRIPPING
) {
4819 if (!(has_net_offloads
&& MLX5_CAP_ETH(dev
->mdev
, vlan_cap
))) {
4820 mlx5_ib_dbg(dev
, "VLAN offloads are not supported\n");
4825 MLX5_SET(rqc
, rqc
, vsd
, 1);
4827 if (init_attr
->create_flags
& IB_WQ_FLAGS_SCATTER_FCS
) {
4828 if (!(has_net_offloads
&& MLX5_CAP_ETH(dev
->mdev
, scatter_fcs
))) {
4829 mlx5_ib_dbg(dev
, "Scatter FCS is not supported\n");
4833 MLX5_SET(rqc
, rqc
, scatter_fcs
, 1);
4835 if (init_attr
->create_flags
& IB_WQ_FLAGS_DELAY_DROP
) {
4836 if (!(dev
->ib_dev
.attrs
.raw_packet_caps
&
4837 IB_RAW_PACKET_CAP_DELAY_DROP
)) {
4838 mlx5_ib_dbg(dev
, "Delay drop is not supported\n");
4842 MLX5_SET(rqc
, rqc
, delay_drop_en
, 1);
4844 rq_pas0
= (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
);
4845 mlx5_ib_populate_pas(dev
, rwq
->umem
, rwq
->page_shift
, rq_pas0
, 0);
4846 err
= mlx5_core_create_rq_tracked(dev
->mdev
, in
, inlen
, &rwq
->core_qp
);
4847 if (!err
&& init_attr
->create_flags
& IB_WQ_FLAGS_DELAY_DROP
) {
4848 err
= set_delay_drop(dev
);
4850 mlx5_ib_warn(dev
, "Failed to enable delay drop err=%d\n",
4852 mlx5_core_destroy_rq_tracked(dev
->mdev
, &rwq
->core_qp
);
4854 rwq
->create_flags
|= MLX5_IB_WQ_FLAGS_DELAY_DROP
;
4862 static int set_user_rq_size(struct mlx5_ib_dev
*dev
,
4863 struct ib_wq_init_attr
*wq_init_attr
,
4864 struct mlx5_ib_create_wq
*ucmd
,
4865 struct mlx5_ib_rwq
*rwq
)
4867 /* Sanity check RQ size before proceeding */
4868 if (wq_init_attr
->max_wr
> (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_wq_sz
)))
4871 if (!ucmd
->rq_wqe_count
)
4874 rwq
->wqe_count
= ucmd
->rq_wqe_count
;
4875 rwq
->wqe_shift
= ucmd
->rq_wqe_shift
;
4876 rwq
->buf_size
= (rwq
->wqe_count
<< rwq
->wqe_shift
);
4877 rwq
->log_rq_stride
= rwq
->wqe_shift
;
4878 rwq
->log_rq_size
= ilog2(rwq
->wqe_count
);
4882 static int prepare_user_rq(struct ib_pd
*pd
,
4883 struct ib_wq_init_attr
*init_attr
,
4884 struct ib_udata
*udata
,
4885 struct mlx5_ib_rwq
*rwq
)
4887 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
4888 struct mlx5_ib_create_wq ucmd
= {};
4890 size_t required_cmd_sz
;
4892 required_cmd_sz
= offsetof(typeof(ucmd
), single_stride_log_num_of_bytes
)
4893 + sizeof(ucmd
.single_stride_log_num_of_bytes
);
4894 if (udata
->inlen
< required_cmd_sz
) {
4895 mlx5_ib_dbg(dev
, "invalid inlen\n");
4899 if (udata
->inlen
> sizeof(ucmd
) &&
4900 !ib_is_udata_cleared(udata
, sizeof(ucmd
),
4901 udata
->inlen
- sizeof(ucmd
))) {
4902 mlx5_ib_dbg(dev
, "inlen is not supported\n");
4906 if (ib_copy_from_udata(&ucmd
, udata
, min(sizeof(ucmd
), udata
->inlen
))) {
4907 mlx5_ib_dbg(dev
, "copy failed\n");
4911 if (ucmd
.comp_mask
& (~MLX5_IB_CREATE_WQ_STRIDING_RQ
)) {
4912 mlx5_ib_dbg(dev
, "invalid comp mask\n");
4914 } else if (ucmd
.comp_mask
& MLX5_IB_CREATE_WQ_STRIDING_RQ
) {
4915 if (!MLX5_CAP_GEN(dev
->mdev
, striding_rq
)) {
4916 mlx5_ib_dbg(dev
, "Striding RQ is not supported\n");
4919 if ((ucmd
.single_stride_log_num_of_bytes
<
4920 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES
) ||
4921 (ucmd
.single_stride_log_num_of_bytes
>
4922 MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES
)) {
4923 mlx5_ib_dbg(dev
, "Invalid log stride size (%u. Range is %u - %u)\n",
4924 ucmd
.single_stride_log_num_of_bytes
,
4925 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES
,
4926 MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES
);
4929 if ((ucmd
.single_wqe_log_num_of_strides
>
4930 MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES
) ||
4931 (ucmd
.single_wqe_log_num_of_strides
<
4932 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES
)) {
4933 mlx5_ib_dbg(dev
, "Invalid log num strides (%u. Range is %u - %u)\n",
4934 ucmd
.single_wqe_log_num_of_strides
,
4935 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES
,
4936 MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES
);
4939 rwq
->single_stride_log_num_of_bytes
=
4940 ucmd
.single_stride_log_num_of_bytes
;
4941 rwq
->log_num_strides
= ucmd
.single_wqe_log_num_of_strides
;
4942 rwq
->two_byte_shift_en
= !!ucmd
.two_byte_shift_en
;
4943 rwq
->create_flags
|= MLX5_IB_WQ_FLAGS_STRIDING_RQ
;
4946 err
= set_user_rq_size(dev
, init_attr
, &ucmd
, rwq
);
4948 mlx5_ib_dbg(dev
, "err %d\n", err
);
4952 err
= create_user_rq(dev
, pd
, rwq
, &ucmd
);
4954 mlx5_ib_dbg(dev
, "err %d\n", err
);
4959 rwq
->user_index
= ucmd
.user_index
;
4963 struct ib_wq
*mlx5_ib_create_wq(struct ib_pd
*pd
,
4964 struct ib_wq_init_attr
*init_attr
,
4965 struct ib_udata
*udata
)
4967 struct mlx5_ib_dev
*dev
;
4968 struct mlx5_ib_rwq
*rwq
;
4969 struct mlx5_ib_create_wq_resp resp
= {};
4970 size_t min_resp_len
;
4974 return ERR_PTR(-ENOSYS
);
4976 min_resp_len
= offsetof(typeof(resp
), reserved
) + sizeof(resp
.reserved
);
4977 if (udata
->outlen
&& udata
->outlen
< min_resp_len
)
4978 return ERR_PTR(-EINVAL
);
4980 dev
= to_mdev(pd
->device
);
4981 switch (init_attr
->wq_type
) {
4983 rwq
= kzalloc(sizeof(*rwq
), GFP_KERNEL
);
4985 return ERR_PTR(-ENOMEM
);
4986 err
= prepare_user_rq(pd
, init_attr
, udata
, rwq
);
4989 err
= create_rq(rwq
, pd
, init_attr
);
4994 mlx5_ib_dbg(dev
, "unsupported wq type %d\n",
4995 init_attr
->wq_type
);
4996 return ERR_PTR(-EINVAL
);
4999 rwq
->ibwq
.wq_num
= rwq
->core_qp
.qpn
;
5000 rwq
->ibwq
.state
= IB_WQS_RESET
;
5001 if (udata
->outlen
) {
5002 resp
.response_length
= offsetof(typeof(resp
), response_length
) +
5003 sizeof(resp
.response_length
);
5004 err
= ib_copy_to_udata(udata
, &resp
, resp
.response_length
);
5009 rwq
->core_qp
.event
= mlx5_ib_wq_event
;
5010 rwq
->ibwq
.event_handler
= init_attr
->event_handler
;
5014 mlx5_core_destroy_rq_tracked(dev
->mdev
, &rwq
->core_qp
);
5016 destroy_user_rq(dev
, pd
, rwq
);
5019 return ERR_PTR(err
);
5022 int mlx5_ib_destroy_wq(struct ib_wq
*wq
)
5024 struct mlx5_ib_dev
*dev
= to_mdev(wq
->device
);
5025 struct mlx5_ib_rwq
*rwq
= to_mrwq(wq
);
5027 mlx5_core_destroy_rq_tracked(dev
->mdev
, &rwq
->core_qp
);
5028 destroy_user_rq(dev
, wq
->pd
, rwq
);
5034 struct ib_rwq_ind_table
*mlx5_ib_create_rwq_ind_table(struct ib_device
*device
,
5035 struct ib_rwq_ind_table_init_attr
*init_attr
,
5036 struct ib_udata
*udata
)
5038 struct mlx5_ib_dev
*dev
= to_mdev(device
);
5039 struct mlx5_ib_rwq_ind_table
*rwq_ind_tbl
;
5040 int sz
= 1 << init_attr
->log_ind_tbl_size
;
5041 struct mlx5_ib_create_rwq_ind_tbl_resp resp
= {};
5042 size_t min_resp_len
;
5049 if (udata
->inlen
> 0 &&
5050 !ib_is_udata_cleared(udata
, 0,
5052 return ERR_PTR(-EOPNOTSUPP
);
5054 if (init_attr
->log_ind_tbl_size
>
5055 MLX5_CAP_GEN(dev
->mdev
, log_max_rqt_size
)) {
5056 mlx5_ib_dbg(dev
, "log_ind_tbl_size = %d is bigger than supported = %d\n",
5057 init_attr
->log_ind_tbl_size
,
5058 MLX5_CAP_GEN(dev
->mdev
, log_max_rqt_size
));
5059 return ERR_PTR(-EINVAL
);
5062 min_resp_len
= offsetof(typeof(resp
), reserved
) + sizeof(resp
.reserved
);
5063 if (udata
->outlen
&& udata
->outlen
< min_resp_len
)
5064 return ERR_PTR(-EINVAL
);
5066 rwq_ind_tbl
= kzalloc(sizeof(*rwq_ind_tbl
), GFP_KERNEL
);
5068 return ERR_PTR(-ENOMEM
);
5070 inlen
= MLX5_ST_SZ_BYTES(create_rqt_in
) + sizeof(u32
) * sz
;
5071 in
= kvzalloc(inlen
, GFP_KERNEL
);
5077 rqtc
= MLX5_ADDR_OF(create_rqt_in
, in
, rqt_context
);
5079 MLX5_SET(rqtc
, rqtc
, rqt_actual_size
, sz
);
5080 MLX5_SET(rqtc
, rqtc
, rqt_max_size
, sz
);
5082 for (i
= 0; i
< sz
; i
++)
5083 MLX5_SET(rqtc
, rqtc
, rq_num
[i
], init_attr
->ind_tbl
[i
]->wq_num
);
5085 err
= mlx5_core_create_rqt(dev
->mdev
, in
, inlen
, &rwq_ind_tbl
->rqtn
);
5091 rwq_ind_tbl
->ib_rwq_ind_tbl
.ind_tbl_num
= rwq_ind_tbl
->rqtn
;
5092 if (udata
->outlen
) {
5093 resp
.response_length
= offsetof(typeof(resp
), response_length
) +
5094 sizeof(resp
.response_length
);
5095 err
= ib_copy_to_udata(udata
, &resp
, resp
.response_length
);
5100 return &rwq_ind_tbl
->ib_rwq_ind_tbl
;
5103 mlx5_core_destroy_rqt(dev
->mdev
, rwq_ind_tbl
->rqtn
);
5106 return ERR_PTR(err
);
5109 int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table
*ib_rwq_ind_tbl
)
5111 struct mlx5_ib_rwq_ind_table
*rwq_ind_tbl
= to_mrwq_ind_table(ib_rwq_ind_tbl
);
5112 struct mlx5_ib_dev
*dev
= to_mdev(ib_rwq_ind_tbl
->device
);
5114 mlx5_core_destroy_rqt(dev
->mdev
, rwq_ind_tbl
->rqtn
);
5120 int mlx5_ib_modify_wq(struct ib_wq
*wq
, struct ib_wq_attr
*wq_attr
,
5121 u32 wq_attr_mask
, struct ib_udata
*udata
)
5123 struct mlx5_ib_dev
*dev
= to_mdev(wq
->device
);
5124 struct mlx5_ib_rwq
*rwq
= to_mrwq(wq
);
5125 struct mlx5_ib_modify_wq ucmd
= {};
5126 size_t required_cmd_sz
;
5134 required_cmd_sz
= offsetof(typeof(ucmd
), reserved
) + sizeof(ucmd
.reserved
);
5135 if (udata
->inlen
< required_cmd_sz
)
5138 if (udata
->inlen
> sizeof(ucmd
) &&
5139 !ib_is_udata_cleared(udata
, sizeof(ucmd
),
5140 udata
->inlen
- sizeof(ucmd
)))
5143 if (ib_copy_from_udata(&ucmd
, udata
, min(sizeof(ucmd
), udata
->inlen
)))
5146 if (ucmd
.comp_mask
|| ucmd
.reserved
)
5149 inlen
= MLX5_ST_SZ_BYTES(modify_rq_in
);
5150 in
= kvzalloc(inlen
, GFP_KERNEL
);
5154 rqc
= MLX5_ADDR_OF(modify_rq_in
, in
, ctx
);
5156 curr_wq_state
= (wq_attr_mask
& IB_WQ_CUR_STATE
) ?
5157 wq_attr
->curr_wq_state
: wq
->state
;
5158 wq_state
= (wq_attr_mask
& IB_WQ_STATE
) ?
5159 wq_attr
->wq_state
: curr_wq_state
;
5160 if (curr_wq_state
== IB_WQS_ERR
)
5161 curr_wq_state
= MLX5_RQC_STATE_ERR
;
5162 if (wq_state
== IB_WQS_ERR
)
5163 wq_state
= MLX5_RQC_STATE_ERR
;
5164 MLX5_SET(modify_rq_in
, in
, rq_state
, curr_wq_state
);
5165 MLX5_SET(rqc
, rqc
, state
, wq_state
);
5167 if (wq_attr_mask
& IB_WQ_FLAGS
) {
5168 if (wq_attr
->flags_mask
& IB_WQ_FLAGS_CVLAN_STRIPPING
) {
5169 if (!(MLX5_CAP_GEN(dev
->mdev
, eth_net_offloads
) &&
5170 MLX5_CAP_ETH(dev
->mdev
, vlan_cap
))) {
5171 mlx5_ib_dbg(dev
, "VLAN offloads are not "
5176 MLX5_SET64(modify_rq_in
, in
, modify_bitmask
,
5177 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD
);
5178 MLX5_SET(rqc
, rqc
, vsd
,
5179 (wq_attr
->flags
& IB_WQ_FLAGS_CVLAN_STRIPPING
) ? 0 : 1);
5182 if (wq_attr
->flags_mask
& IB_WQ_FLAGS_PCI_WRITE_END_PADDING
) {
5183 mlx5_ib_dbg(dev
, "Modifying scatter end padding is not supported\n");
5189 if (curr_wq_state
== IB_WQS_RESET
&& wq_state
== IB_WQS_RDY
) {
5190 if (MLX5_CAP_GEN(dev
->mdev
, modify_rq_counter_set_id
)) {
5191 MLX5_SET64(modify_rq_in
, in
, modify_bitmask
,
5192 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID
);
5193 MLX5_SET(rqc
, rqc
, counter_set_id
,
5194 dev
->port
->cnts
.set_id
);
5196 pr_info_once("%s: Receive WQ counters are not supported on current FW\n",
5200 err
= mlx5_core_modify_rq(dev
->mdev
, rwq
->core_qp
.qpn
, in
, inlen
);
5202 rwq
->ibwq
.state
= (wq_state
== MLX5_RQC_STATE_ERR
) ? IB_WQS_ERR
: wq_state
;