2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
34 #include <rdma/ib_umem.h>
35 #include <rdma/ib_cache.h>
36 #include <rdma/ib_user_verbs.h>
39 /* not supported currently */
40 static int wq_signature
;
43 MLX5_IB_ACK_REQ_FREQ
= 8,
47 MLX5_IB_DEFAULT_SCHED_QUEUE
= 0x83,
48 MLX5_IB_DEFAULT_QP0_SCHED_QUEUE
= 0x3f,
49 MLX5_IB_LINK_TYPE_IB
= 0,
50 MLX5_IB_LINK_TYPE_ETH
= 1
54 MLX5_IB_SQ_STRIDE
= 6,
57 static const u32 mlx5_ib_opcode
[] = {
58 [IB_WR_SEND
] = MLX5_OPCODE_SEND
,
59 [IB_WR_LSO
] = MLX5_OPCODE_LSO
,
60 [IB_WR_SEND_WITH_IMM
] = MLX5_OPCODE_SEND_IMM
,
61 [IB_WR_RDMA_WRITE
] = MLX5_OPCODE_RDMA_WRITE
,
62 [IB_WR_RDMA_WRITE_WITH_IMM
] = MLX5_OPCODE_RDMA_WRITE_IMM
,
63 [IB_WR_RDMA_READ
] = MLX5_OPCODE_RDMA_READ
,
64 [IB_WR_ATOMIC_CMP_AND_SWP
] = MLX5_OPCODE_ATOMIC_CS
,
65 [IB_WR_ATOMIC_FETCH_AND_ADD
] = MLX5_OPCODE_ATOMIC_FA
,
66 [IB_WR_SEND_WITH_INV
] = MLX5_OPCODE_SEND_INVAL
,
67 [IB_WR_LOCAL_INV
] = MLX5_OPCODE_UMR
,
68 [IB_WR_REG_MR
] = MLX5_OPCODE_UMR
,
69 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP
] = MLX5_OPCODE_ATOMIC_MASKED_CS
,
70 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
] = MLX5_OPCODE_ATOMIC_MASKED_FA
,
71 [MLX5_IB_WR_UMR
] = MLX5_OPCODE_UMR
,
74 struct mlx5_wqe_eth_pad
{
78 enum raw_qp_set_mask_map
{
79 MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID
= 1UL << 0,
80 MLX5_RAW_QP_RATE_LIMIT
= 1UL << 1,
83 struct mlx5_modify_raw_qp_param
{
86 u32 set_mask
; /* raw_qp_set_mask_map */
91 static void get_cqs(enum ib_qp_type qp_type
,
92 struct ib_cq
*ib_send_cq
, struct ib_cq
*ib_recv_cq
,
93 struct mlx5_ib_cq
**send_cq
, struct mlx5_ib_cq
**recv_cq
);
95 static int is_qp0(enum ib_qp_type qp_type
)
97 return qp_type
== IB_QPT_SMI
;
100 static int is_sqp(enum ib_qp_type qp_type
)
102 return is_qp0(qp_type
) || is_qp1(qp_type
);
105 static void *get_wqe(struct mlx5_ib_qp
*qp
, int offset
)
107 return mlx5_buf_offset(&qp
->buf
, offset
);
110 static void *get_recv_wqe(struct mlx5_ib_qp
*qp
, int n
)
112 return get_wqe(qp
, qp
->rq
.offset
+ (n
<< qp
->rq
.wqe_shift
));
115 void *mlx5_get_send_wqe(struct mlx5_ib_qp
*qp
, int n
)
117 return get_wqe(qp
, qp
->sq
.offset
+ (n
<< MLX5_IB_SQ_STRIDE
));
121 * mlx5_ib_read_user_wqe() - Copy a user-space WQE to kernel space.
123 * @qp: QP to copy from.
124 * @send: copy from the send queue when non-zero, use the receive queue
126 * @wqe_index: index to start copying from. For send work queues, the
127 * wqe_index is in units of MLX5_SEND_WQE_BB.
128 * For receive work queue, it is the number of work queue
129 * element in the queue.
130 * @buffer: destination buffer.
131 * @length: maximum number of bytes to copy.
133 * Copies at least a single WQE, but may copy more data.
135 * Return: the number of bytes copied, or an error code.
137 int mlx5_ib_read_user_wqe(struct mlx5_ib_qp
*qp
, int send
, int wqe_index
,
138 void *buffer
, u32 length
,
139 struct mlx5_ib_qp_base
*base
)
141 struct ib_device
*ibdev
= qp
->ibqp
.device
;
142 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
143 struct mlx5_ib_wq
*wq
= send
? &qp
->sq
: &qp
->rq
;
146 struct ib_umem
*umem
= base
->ubuffer
.umem
;
147 u32 first_copy_length
;
151 if (wq
->wqe_cnt
== 0) {
152 mlx5_ib_dbg(dev
, "mlx5_ib_read_user_wqe for a QP with wqe_cnt == 0. qp_type: 0x%x\n",
157 offset
= wq
->offset
+ ((wqe_index
% wq
->wqe_cnt
) << wq
->wqe_shift
);
158 wq_end
= wq
->offset
+ (wq
->wqe_cnt
<< wq
->wqe_shift
);
160 if (send
&& length
< sizeof(struct mlx5_wqe_ctrl_seg
))
163 if (offset
> umem
->length
||
164 (send
&& offset
+ sizeof(struct mlx5_wqe_ctrl_seg
) > umem
->length
))
167 first_copy_length
= min_t(u32
, offset
+ length
, wq_end
) - offset
;
168 ret
= ib_umem_copy_from(buffer
, umem
, offset
, first_copy_length
);
173 struct mlx5_wqe_ctrl_seg
*ctrl
= buffer
;
174 int ds
= be32_to_cpu(ctrl
->qpn_ds
) & MLX5_WQE_CTRL_DS_MASK
;
176 wqe_length
= ds
* MLX5_WQE_DS_UNITS
;
178 wqe_length
= 1 << wq
->wqe_shift
;
181 if (wqe_length
<= first_copy_length
)
182 return first_copy_length
;
184 ret
= ib_umem_copy_from(buffer
+ first_copy_length
, umem
, wq
->offset
,
185 wqe_length
- first_copy_length
);
192 static void mlx5_ib_qp_event(struct mlx5_core_qp
*qp
, int type
)
194 struct ib_qp
*ibqp
= &to_mibqp(qp
)->ibqp
;
195 struct ib_event event
;
197 if (type
== MLX5_EVENT_TYPE_PATH_MIG
) {
198 /* This event is only valid for trans_qps */
199 to_mibqp(qp
)->port
= to_mibqp(qp
)->trans_qp
.alt_port
;
202 if (ibqp
->event_handler
) {
203 event
.device
= ibqp
->device
;
204 event
.element
.qp
= ibqp
;
206 case MLX5_EVENT_TYPE_PATH_MIG
:
207 event
.event
= IB_EVENT_PATH_MIG
;
209 case MLX5_EVENT_TYPE_COMM_EST
:
210 event
.event
= IB_EVENT_COMM_EST
;
212 case MLX5_EVENT_TYPE_SQ_DRAINED
:
213 event
.event
= IB_EVENT_SQ_DRAINED
;
215 case MLX5_EVENT_TYPE_SRQ_LAST_WQE
:
216 event
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
218 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR
:
219 event
.event
= IB_EVENT_QP_FATAL
;
221 case MLX5_EVENT_TYPE_PATH_MIG_FAILED
:
222 event
.event
= IB_EVENT_PATH_MIG_ERR
;
224 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR
:
225 event
.event
= IB_EVENT_QP_REQ_ERR
;
227 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR
:
228 event
.event
= IB_EVENT_QP_ACCESS_ERR
;
231 pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type
, qp
->qpn
);
235 ibqp
->event_handler(&event
, ibqp
->qp_context
);
239 static int set_rq_size(struct mlx5_ib_dev
*dev
, struct ib_qp_cap
*cap
,
240 int has_rq
, struct mlx5_ib_qp
*qp
, struct mlx5_ib_create_qp
*ucmd
)
245 /* Sanity check RQ size before proceeding */
246 if (cap
->max_recv_wr
> (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_qp_sz
)))
252 qp
->rq
.wqe_shift
= 0;
253 cap
->max_recv_wr
= 0;
254 cap
->max_recv_sge
= 0;
257 qp
->rq
.wqe_cnt
= ucmd
->rq_wqe_count
;
258 qp
->rq
.wqe_shift
= ucmd
->rq_wqe_shift
;
259 qp
->rq
.max_gs
= (1 << qp
->rq
.wqe_shift
) / sizeof(struct mlx5_wqe_data_seg
) - qp
->wq_sig
;
260 qp
->rq
.max_post
= qp
->rq
.wqe_cnt
;
262 wqe_size
= qp
->wq_sig
? sizeof(struct mlx5_wqe_signature_seg
) : 0;
263 wqe_size
+= cap
->max_recv_sge
* sizeof(struct mlx5_wqe_data_seg
);
264 wqe_size
= roundup_pow_of_two(wqe_size
);
265 wq_size
= roundup_pow_of_two(cap
->max_recv_wr
) * wqe_size
;
266 wq_size
= max_t(int, wq_size
, MLX5_SEND_WQE_BB
);
267 qp
->rq
.wqe_cnt
= wq_size
/ wqe_size
;
268 if (wqe_size
> MLX5_CAP_GEN(dev
->mdev
, max_wqe_sz_rq
)) {
269 mlx5_ib_dbg(dev
, "wqe_size %d, max %d\n",
271 MLX5_CAP_GEN(dev
->mdev
,
275 qp
->rq
.wqe_shift
= ilog2(wqe_size
);
276 qp
->rq
.max_gs
= (1 << qp
->rq
.wqe_shift
) / sizeof(struct mlx5_wqe_data_seg
) - qp
->wq_sig
;
277 qp
->rq
.max_post
= qp
->rq
.wqe_cnt
;
284 static int sq_overhead(struct ib_qp_init_attr
*attr
)
288 switch (attr
->qp_type
) {
290 size
+= sizeof(struct mlx5_wqe_xrc_seg
);
293 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
294 max(sizeof(struct mlx5_wqe_atomic_seg
) +
295 sizeof(struct mlx5_wqe_raddr_seg
),
296 sizeof(struct mlx5_wqe_umr_ctrl_seg
) +
297 sizeof(struct mlx5_mkey_seg
));
304 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
305 max(sizeof(struct mlx5_wqe_raddr_seg
),
306 sizeof(struct mlx5_wqe_umr_ctrl_seg
) +
307 sizeof(struct mlx5_mkey_seg
));
311 if (attr
->create_flags
& IB_QP_CREATE_IPOIB_UD_LSO
)
312 size
+= sizeof(struct mlx5_wqe_eth_pad
) +
313 sizeof(struct mlx5_wqe_eth_seg
);
316 case MLX5_IB_QPT_HW_GSI
:
317 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
318 sizeof(struct mlx5_wqe_datagram_seg
);
321 case MLX5_IB_QPT_REG_UMR
:
322 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
323 sizeof(struct mlx5_wqe_umr_ctrl_seg
) +
324 sizeof(struct mlx5_mkey_seg
);
334 static int calc_send_wqe(struct ib_qp_init_attr
*attr
)
339 size
= sq_overhead(attr
);
343 if (attr
->cap
.max_inline_data
) {
344 inl_size
= size
+ sizeof(struct mlx5_wqe_inline_seg
) +
345 attr
->cap
.max_inline_data
;
348 size
+= attr
->cap
.max_send_sge
* sizeof(struct mlx5_wqe_data_seg
);
349 if (attr
->create_flags
& IB_QP_CREATE_SIGNATURE_EN
&&
350 ALIGN(max_t(int, inl_size
, size
), MLX5_SEND_WQE_BB
) < MLX5_SIG_WQE_SIZE
)
351 return MLX5_SIG_WQE_SIZE
;
353 return ALIGN(max_t(int, inl_size
, size
), MLX5_SEND_WQE_BB
);
356 static int get_send_sge(struct ib_qp_init_attr
*attr
, int wqe_size
)
360 if (attr
->qp_type
== IB_QPT_RC
)
361 max_sge
= (min_t(int, wqe_size
, 512) -
362 sizeof(struct mlx5_wqe_ctrl_seg
) -
363 sizeof(struct mlx5_wqe_raddr_seg
)) /
364 sizeof(struct mlx5_wqe_data_seg
);
365 else if (attr
->qp_type
== IB_QPT_XRC_INI
)
366 max_sge
= (min_t(int, wqe_size
, 512) -
367 sizeof(struct mlx5_wqe_ctrl_seg
) -
368 sizeof(struct mlx5_wqe_xrc_seg
) -
369 sizeof(struct mlx5_wqe_raddr_seg
)) /
370 sizeof(struct mlx5_wqe_data_seg
);
372 max_sge
= (wqe_size
- sq_overhead(attr
)) /
373 sizeof(struct mlx5_wqe_data_seg
);
375 return min_t(int, max_sge
, wqe_size
- sq_overhead(attr
) /
376 sizeof(struct mlx5_wqe_data_seg
));
379 static int calc_sq_size(struct mlx5_ib_dev
*dev
, struct ib_qp_init_attr
*attr
,
380 struct mlx5_ib_qp
*qp
)
385 if (!attr
->cap
.max_send_wr
)
388 wqe_size
= calc_send_wqe(attr
);
389 mlx5_ib_dbg(dev
, "wqe_size %d\n", wqe_size
);
393 if (wqe_size
> MLX5_CAP_GEN(dev
->mdev
, max_wqe_sz_sq
)) {
394 mlx5_ib_dbg(dev
, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
395 wqe_size
, MLX5_CAP_GEN(dev
->mdev
, max_wqe_sz_sq
));
399 qp
->max_inline_data
= wqe_size
- sq_overhead(attr
) -
400 sizeof(struct mlx5_wqe_inline_seg
);
401 attr
->cap
.max_inline_data
= qp
->max_inline_data
;
403 if (attr
->create_flags
& IB_QP_CREATE_SIGNATURE_EN
)
404 qp
->signature_en
= true;
406 wq_size
= roundup_pow_of_two(attr
->cap
.max_send_wr
* wqe_size
);
407 qp
->sq
.wqe_cnt
= wq_size
/ MLX5_SEND_WQE_BB
;
408 if (qp
->sq
.wqe_cnt
> (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_qp_sz
))) {
409 mlx5_ib_dbg(dev
, "send queue size (%d * %d / %d -> %d) exceeds limits(%d)\n",
410 attr
->cap
.max_send_wr
, wqe_size
, MLX5_SEND_WQE_BB
,
412 1 << MLX5_CAP_GEN(dev
->mdev
, log_max_qp_sz
));
415 qp
->sq
.wqe_shift
= ilog2(MLX5_SEND_WQE_BB
);
416 qp
->sq
.max_gs
= get_send_sge(attr
, wqe_size
);
417 if (qp
->sq
.max_gs
< attr
->cap
.max_send_sge
)
420 attr
->cap
.max_send_sge
= qp
->sq
.max_gs
;
421 qp
->sq
.max_post
= wq_size
/ wqe_size
;
422 attr
->cap
.max_send_wr
= qp
->sq
.max_post
;
427 static int set_user_buf_size(struct mlx5_ib_dev
*dev
,
428 struct mlx5_ib_qp
*qp
,
429 struct mlx5_ib_create_qp
*ucmd
,
430 struct mlx5_ib_qp_base
*base
,
431 struct ib_qp_init_attr
*attr
)
433 int desc_sz
= 1 << qp
->sq
.wqe_shift
;
435 if (desc_sz
> MLX5_CAP_GEN(dev
->mdev
, max_wqe_sz_sq
)) {
436 mlx5_ib_warn(dev
, "desc_sz %d, max_sq_desc_sz %d\n",
437 desc_sz
, MLX5_CAP_GEN(dev
->mdev
, max_wqe_sz_sq
));
441 if (ucmd
->sq_wqe_count
&& ((1 << ilog2(ucmd
->sq_wqe_count
)) != ucmd
->sq_wqe_count
)) {
442 mlx5_ib_warn(dev
, "sq_wqe_count %d, sq_wqe_count %d\n",
443 ucmd
->sq_wqe_count
, ucmd
->sq_wqe_count
);
447 qp
->sq
.wqe_cnt
= ucmd
->sq_wqe_count
;
449 if (qp
->sq
.wqe_cnt
> (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_qp_sz
))) {
450 mlx5_ib_warn(dev
, "wqe_cnt %d, max_wqes %d\n",
452 1 << MLX5_CAP_GEN(dev
->mdev
, log_max_qp_sz
));
456 if (attr
->qp_type
== IB_QPT_RAW_PACKET
) {
457 base
->ubuffer
.buf_size
= qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
;
458 qp
->raw_packet_qp
.sq
.ubuffer
.buf_size
= qp
->sq
.wqe_cnt
<< 6;
460 base
->ubuffer
.buf_size
= (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
) +
461 (qp
->sq
.wqe_cnt
<< 6);
467 static int qp_has_rq(struct ib_qp_init_attr
*attr
)
469 if (attr
->qp_type
== IB_QPT_XRC_INI
||
470 attr
->qp_type
== IB_QPT_XRC_TGT
|| attr
->srq
||
471 attr
->qp_type
== MLX5_IB_QPT_REG_UMR
||
472 !attr
->cap
.max_recv_wr
)
478 static int first_med_uuar(void)
483 static int next_uuar(int n
)
487 while (((n
% 4) & 2))
493 static int num_med_uuar(struct mlx5_uuar_info
*uuari
)
497 n
= uuari
->num_uars
* MLX5_NON_FP_BF_REGS_PER_PAGE
-
498 uuari
->num_low_latency_uuars
- 1;
500 return n
>= 0 ? n
: 0;
503 static int max_uuari(struct mlx5_uuar_info
*uuari
)
505 return uuari
->num_uars
* 4;
508 static int first_hi_uuar(struct mlx5_uuar_info
*uuari
)
514 med
= num_med_uuar(uuari
);
515 for (t
= 0, i
= first_med_uuar();; i
= next_uuar(i
)) {
524 static int alloc_high_class_uuar(struct mlx5_uuar_info
*uuari
)
528 for (i
= first_hi_uuar(uuari
); i
< max_uuari(uuari
); i
= next_uuar(i
)) {
529 if (!test_bit(i
, uuari
->bitmap
)) {
530 set_bit(i
, uuari
->bitmap
);
539 static int alloc_med_class_uuar(struct mlx5_uuar_info
*uuari
)
541 int minidx
= first_med_uuar();
544 for (i
= first_med_uuar(); i
< first_hi_uuar(uuari
); i
= next_uuar(i
)) {
545 if (uuari
->count
[i
] < uuari
->count
[minidx
])
549 uuari
->count
[minidx
]++;
553 static int alloc_uuar(struct mlx5_uuar_info
*uuari
,
554 enum mlx5_ib_latency_class lat
)
558 mutex_lock(&uuari
->lock
);
560 case MLX5_IB_LATENCY_CLASS_LOW
:
562 uuari
->count
[uuarn
]++;
565 case MLX5_IB_LATENCY_CLASS_MEDIUM
:
569 uuarn
= alloc_med_class_uuar(uuari
);
572 case MLX5_IB_LATENCY_CLASS_HIGH
:
576 uuarn
= alloc_high_class_uuar(uuari
);
579 case MLX5_IB_LATENCY_CLASS_FAST_PATH
:
583 mutex_unlock(&uuari
->lock
);
588 static void free_med_class_uuar(struct mlx5_uuar_info
*uuari
, int uuarn
)
590 clear_bit(uuarn
, uuari
->bitmap
);
591 --uuari
->count
[uuarn
];
594 static void free_high_class_uuar(struct mlx5_uuar_info
*uuari
, int uuarn
)
596 clear_bit(uuarn
, uuari
->bitmap
);
597 --uuari
->count
[uuarn
];
600 static void free_uuar(struct mlx5_uuar_info
*uuari
, int uuarn
)
602 int nuuars
= uuari
->num_uars
* MLX5_BF_REGS_PER_PAGE
;
603 int high_uuar
= nuuars
- uuari
->num_low_latency_uuars
;
605 mutex_lock(&uuari
->lock
);
607 --uuari
->count
[uuarn
];
611 if (uuarn
< high_uuar
) {
612 free_med_class_uuar(uuari
, uuarn
);
616 free_high_class_uuar(uuari
, uuarn
);
619 mutex_unlock(&uuari
->lock
);
622 static enum mlx5_qp_state
to_mlx5_state(enum ib_qp_state state
)
625 case IB_QPS_RESET
: return MLX5_QP_STATE_RST
;
626 case IB_QPS_INIT
: return MLX5_QP_STATE_INIT
;
627 case IB_QPS_RTR
: return MLX5_QP_STATE_RTR
;
628 case IB_QPS_RTS
: return MLX5_QP_STATE_RTS
;
629 case IB_QPS_SQD
: return MLX5_QP_STATE_SQD
;
630 case IB_QPS_SQE
: return MLX5_QP_STATE_SQER
;
631 case IB_QPS_ERR
: return MLX5_QP_STATE_ERR
;
636 static int to_mlx5_st(enum ib_qp_type type
)
639 case IB_QPT_RC
: return MLX5_QP_ST_RC
;
640 case IB_QPT_UC
: return MLX5_QP_ST_UC
;
641 case IB_QPT_UD
: return MLX5_QP_ST_UD
;
642 case MLX5_IB_QPT_REG_UMR
: return MLX5_QP_ST_REG_UMR
;
644 case IB_QPT_XRC_TGT
: return MLX5_QP_ST_XRC
;
645 case IB_QPT_SMI
: return MLX5_QP_ST_QP0
;
646 case MLX5_IB_QPT_HW_GSI
: return MLX5_QP_ST_QP1
;
647 case IB_QPT_RAW_IPV6
: return MLX5_QP_ST_RAW_IPV6
;
648 case IB_QPT_RAW_PACKET
:
649 case IB_QPT_RAW_ETHERTYPE
: return MLX5_QP_ST_RAW_ETHERTYPE
;
651 default: return -EINVAL
;
655 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq
*send_cq
,
656 struct mlx5_ib_cq
*recv_cq
);
657 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq
*send_cq
,
658 struct mlx5_ib_cq
*recv_cq
);
660 static int uuarn_to_uar_index(struct mlx5_uuar_info
*uuari
, int uuarn
)
662 return uuari
->uars
[uuarn
/ MLX5_BF_REGS_PER_PAGE
].index
;
665 static int mlx5_ib_umem_get(struct mlx5_ib_dev
*dev
,
667 unsigned long addr
, size_t size
,
668 struct ib_umem
**umem
,
669 int *npages
, int *page_shift
, int *ncont
,
674 *umem
= ib_umem_get(pd
->uobject
->context
, addr
, size
, 0, 0);
676 mlx5_ib_dbg(dev
, "umem_get failed\n");
677 return PTR_ERR(*umem
);
680 mlx5_ib_cont_pages(*umem
, addr
, 0, npages
, page_shift
, ncont
, NULL
);
682 err
= mlx5_ib_get_buf_offset(addr
, *page_shift
, offset
);
684 mlx5_ib_warn(dev
, "bad offset\n");
688 mlx5_ib_dbg(dev
, "addr 0x%lx, size %zu, npages %d, page_shift %d, ncont %d, offset %d\n",
689 addr
, size
, *npages
, *page_shift
, *ncont
, *offset
);
694 ib_umem_release(*umem
);
700 static void destroy_user_rq(struct ib_pd
*pd
, struct mlx5_ib_rwq
*rwq
)
702 struct mlx5_ib_ucontext
*context
;
704 context
= to_mucontext(pd
->uobject
->context
);
705 mlx5_ib_db_unmap_user(context
, &rwq
->db
);
707 ib_umem_release(rwq
->umem
);
710 static int create_user_rq(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
711 struct mlx5_ib_rwq
*rwq
,
712 struct mlx5_ib_create_wq
*ucmd
)
714 struct mlx5_ib_ucontext
*context
;
724 context
= to_mucontext(pd
->uobject
->context
);
725 rwq
->umem
= ib_umem_get(pd
->uobject
->context
, ucmd
->buf_addr
,
726 rwq
->buf_size
, 0, 0);
727 if (IS_ERR(rwq
->umem
)) {
728 mlx5_ib_dbg(dev
, "umem_get failed\n");
729 err
= PTR_ERR(rwq
->umem
);
733 mlx5_ib_cont_pages(rwq
->umem
, ucmd
->buf_addr
, 0, &npages
, &page_shift
,
735 err
= mlx5_ib_get_buf_offset(ucmd
->buf_addr
, page_shift
,
736 &rwq
->rq_page_offset
);
738 mlx5_ib_warn(dev
, "bad offset\n");
742 rwq
->rq_num_pas
= ncont
;
743 rwq
->page_shift
= page_shift
;
744 rwq
->log_page_size
= page_shift
- MLX5_ADAPTER_PAGE_SHIFT
;
745 rwq
->wq_sig
= !!(ucmd
->flags
& MLX5_WQ_FLAG_SIGNATURE
);
747 mlx5_ib_dbg(dev
, "addr 0x%llx, size %zd, npages %d, page_shift %d, ncont %d, offset %d\n",
748 (unsigned long long)ucmd
->buf_addr
, rwq
->buf_size
,
749 npages
, page_shift
, ncont
, offset
);
751 err
= mlx5_ib_db_map_user(context
, ucmd
->db_addr
, &rwq
->db
);
753 mlx5_ib_dbg(dev
, "map failed\n");
757 rwq
->create_type
= MLX5_WQ_USER
;
761 ib_umem_release(rwq
->umem
);
765 static int create_user_qp(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
766 struct mlx5_ib_qp
*qp
, struct ib_udata
*udata
,
767 struct ib_qp_init_attr
*attr
,
769 struct mlx5_ib_create_qp_resp
*resp
, int *inlen
,
770 struct mlx5_ib_qp_base
*base
)
772 struct mlx5_ib_ucontext
*context
;
773 struct mlx5_ib_create_qp ucmd
;
774 struct mlx5_ib_ubuffer
*ubuffer
= &base
->ubuffer
;
785 err
= ib_copy_from_udata(&ucmd
, udata
, sizeof(ucmd
));
787 mlx5_ib_dbg(dev
, "copy failed\n");
791 context
= to_mucontext(pd
->uobject
->context
);
793 * TBD: should come from the verbs when we have the API
795 if (qp
->flags
& MLX5_IB_QP_CROSS_CHANNEL
)
796 /* In CROSS_CHANNEL CQ and QP must use the same UAR */
797 uuarn
= MLX5_CROSS_CHANNEL_UUAR
;
799 uuarn
= alloc_uuar(&context
->uuari
, MLX5_IB_LATENCY_CLASS_HIGH
);
801 mlx5_ib_dbg(dev
, "failed to allocate low latency UUAR\n");
802 mlx5_ib_dbg(dev
, "reverting to medium latency\n");
803 uuarn
= alloc_uuar(&context
->uuari
, MLX5_IB_LATENCY_CLASS_MEDIUM
);
805 mlx5_ib_dbg(dev
, "failed to allocate medium latency UUAR\n");
806 mlx5_ib_dbg(dev
, "reverting to high latency\n");
807 uuarn
= alloc_uuar(&context
->uuari
, MLX5_IB_LATENCY_CLASS_LOW
);
809 mlx5_ib_warn(dev
, "uuar allocation failed\n");
816 uar_index
= uuarn_to_uar_index(&context
->uuari
, uuarn
);
817 mlx5_ib_dbg(dev
, "uuarn 0x%x, uar_index 0x%x\n", uuarn
, uar_index
);
820 qp
->sq
.wqe_shift
= ilog2(MLX5_SEND_WQE_BB
);
821 qp
->sq
.offset
= qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
;
823 err
= set_user_buf_size(dev
, qp
, &ucmd
, base
, attr
);
827 if (ucmd
.buf_addr
&& ubuffer
->buf_size
) {
828 ubuffer
->buf_addr
= ucmd
.buf_addr
;
829 err
= mlx5_ib_umem_get(dev
, pd
, ubuffer
->buf_addr
,
831 &ubuffer
->umem
, &npages
, &page_shift
,
836 ubuffer
->umem
= NULL
;
839 *inlen
= MLX5_ST_SZ_BYTES(create_qp_in
) +
840 MLX5_FLD_SZ_BYTES(create_qp_in
, pas
[0]) * ncont
;
841 *in
= mlx5_vzalloc(*inlen
);
847 pas
= (__be64
*)MLX5_ADDR_OF(create_qp_in
, *in
, pas
);
849 mlx5_ib_populate_pas(dev
, ubuffer
->umem
, page_shift
, pas
, 0);
851 qpc
= MLX5_ADDR_OF(create_qp_in
, *in
, qpc
);
853 MLX5_SET(qpc
, qpc
, log_page_size
, page_shift
- MLX5_ADAPTER_PAGE_SHIFT
);
854 MLX5_SET(qpc
, qpc
, page_offset
, offset
);
856 MLX5_SET(qpc
, qpc
, uar_page
, uar_index
);
857 resp
->uuar_index
= uuarn
;
860 err
= mlx5_ib_db_map_user(context
, ucmd
.db_addr
, &qp
->db
);
862 mlx5_ib_dbg(dev
, "map failed\n");
866 err
= ib_copy_to_udata(udata
, resp
, sizeof(*resp
));
868 mlx5_ib_dbg(dev
, "copy failed\n");
871 qp
->create_type
= MLX5_QP_USER
;
876 mlx5_ib_db_unmap_user(context
, &qp
->db
);
883 ib_umem_release(ubuffer
->umem
);
886 free_uuar(&context
->uuari
, uuarn
);
890 static void destroy_qp_user(struct ib_pd
*pd
, struct mlx5_ib_qp
*qp
,
891 struct mlx5_ib_qp_base
*base
)
893 struct mlx5_ib_ucontext
*context
;
895 context
= to_mucontext(pd
->uobject
->context
);
896 mlx5_ib_db_unmap_user(context
, &qp
->db
);
897 if (base
->ubuffer
.umem
)
898 ib_umem_release(base
->ubuffer
.umem
);
899 free_uuar(&context
->uuari
, qp
->uuarn
);
902 static int create_kernel_qp(struct mlx5_ib_dev
*dev
,
903 struct ib_qp_init_attr
*init_attr
,
904 struct mlx5_ib_qp
*qp
,
905 u32
**in
, int *inlen
,
906 struct mlx5_ib_qp_base
*base
)
908 enum mlx5_ib_latency_class lc
= MLX5_IB_LATENCY_CLASS_LOW
;
909 struct mlx5_uuar_info
*uuari
;
915 uuari
= &dev
->mdev
->priv
.uuari
;
916 if (init_attr
->create_flags
& ~(IB_QP_CREATE_SIGNATURE_EN
|
917 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
|
918 IB_QP_CREATE_IPOIB_UD_LSO
|
919 mlx5_ib_create_qp_sqpn_qp1()))
922 if (init_attr
->qp_type
== MLX5_IB_QPT_REG_UMR
)
923 lc
= MLX5_IB_LATENCY_CLASS_FAST_PATH
;
925 uuarn
= alloc_uuar(uuari
, lc
);
927 mlx5_ib_dbg(dev
, "\n");
931 qp
->bf
= &uuari
->bfs
[uuarn
];
932 uar_index
= qp
->bf
->uar
->index
;
934 err
= calc_sq_size(dev
, init_attr
, qp
);
936 mlx5_ib_dbg(dev
, "err %d\n", err
);
941 qp
->sq
.offset
= qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
;
942 base
->ubuffer
.buf_size
= err
+ (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
);
944 err
= mlx5_buf_alloc(dev
->mdev
, base
->ubuffer
.buf_size
, &qp
->buf
);
946 mlx5_ib_dbg(dev
, "err %d\n", err
);
950 qp
->sq
.qend
= mlx5_get_send_wqe(qp
, qp
->sq
.wqe_cnt
);
951 *inlen
= MLX5_ST_SZ_BYTES(create_qp_in
) +
952 MLX5_FLD_SZ_BYTES(create_qp_in
, pas
[0]) * qp
->buf
.npages
;
953 *in
= mlx5_vzalloc(*inlen
);
959 qpc
= MLX5_ADDR_OF(create_qp_in
, *in
, qpc
);
960 MLX5_SET(qpc
, qpc
, uar_page
, uar_index
);
961 MLX5_SET(qpc
, qpc
, log_page_size
, qp
->buf
.page_shift
- MLX5_ADAPTER_PAGE_SHIFT
);
963 /* Set "fast registration enabled" for all kernel QPs */
964 MLX5_SET(qpc
, qpc
, fre
, 1);
965 MLX5_SET(qpc
, qpc
, rlky
, 1);
967 if (init_attr
->create_flags
& mlx5_ib_create_qp_sqpn_qp1()) {
968 MLX5_SET(qpc
, qpc
, deth_sqpn
, 1);
969 qp
->flags
|= MLX5_IB_QP_SQPN_QP1
;
972 mlx5_fill_page_array(&qp
->buf
,
973 (__be64
*)MLX5_ADDR_OF(create_qp_in
, *in
, pas
));
975 err
= mlx5_db_alloc(dev
->mdev
, &qp
->db
);
977 mlx5_ib_dbg(dev
, "err %d\n", err
);
981 qp
->sq
.wrid
= kmalloc(qp
->sq
.wqe_cnt
* sizeof(*qp
->sq
.wrid
), GFP_KERNEL
);
982 qp
->sq
.wr_data
= kmalloc(qp
->sq
.wqe_cnt
* sizeof(*qp
->sq
.wr_data
), GFP_KERNEL
);
983 qp
->rq
.wrid
= kmalloc(qp
->rq
.wqe_cnt
* sizeof(*qp
->rq
.wrid
), GFP_KERNEL
);
984 qp
->sq
.w_list
= kmalloc(qp
->sq
.wqe_cnt
* sizeof(*qp
->sq
.w_list
), GFP_KERNEL
);
985 qp
->sq
.wqe_head
= kmalloc(qp
->sq
.wqe_cnt
* sizeof(*qp
->sq
.wqe_head
), GFP_KERNEL
);
987 if (!qp
->sq
.wrid
|| !qp
->sq
.wr_data
|| !qp
->rq
.wrid
||
988 !qp
->sq
.w_list
|| !qp
->sq
.wqe_head
) {
992 qp
->create_type
= MLX5_QP_KERNEL
;
997 mlx5_db_free(dev
->mdev
, &qp
->db
);
998 kfree(qp
->sq
.wqe_head
);
999 kfree(qp
->sq
.w_list
);
1001 kfree(qp
->sq
.wr_data
);
1008 mlx5_buf_free(dev
->mdev
, &qp
->buf
);
1011 free_uuar(&dev
->mdev
->priv
.uuari
, uuarn
);
1015 static void destroy_qp_kernel(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
)
1017 mlx5_db_free(dev
->mdev
, &qp
->db
);
1018 kfree(qp
->sq
.wqe_head
);
1019 kfree(qp
->sq
.w_list
);
1021 kfree(qp
->sq
.wr_data
);
1023 mlx5_buf_free(dev
->mdev
, &qp
->buf
);
1024 free_uuar(&dev
->mdev
->priv
.uuari
, qp
->bf
->uuarn
);
1027 static u32
get_rx_type(struct mlx5_ib_qp
*qp
, struct ib_qp_init_attr
*attr
)
1029 if (attr
->srq
|| (attr
->qp_type
== IB_QPT_XRC_TGT
) ||
1030 (attr
->qp_type
== IB_QPT_XRC_INI
))
1032 else if (!qp
->has_rq
)
1033 return MLX5_ZERO_LEN_RQ
;
1035 return MLX5_NON_ZERO_RQ
;
1038 static int is_connected(enum ib_qp_type qp_type
)
1040 if (qp_type
== IB_QPT_RC
|| qp_type
== IB_QPT_UC
)
1046 static int create_raw_packet_qp_tis(struct mlx5_ib_dev
*dev
,
1047 struct mlx5_ib_sq
*sq
, u32 tdn
)
1049 u32 in
[MLX5_ST_SZ_DW(create_tis_in
)] = {0};
1050 void *tisc
= MLX5_ADDR_OF(create_tis_in
, in
, ctx
);
1052 MLX5_SET(tisc
, tisc
, transport_domain
, tdn
);
1053 return mlx5_core_create_tis(dev
->mdev
, in
, sizeof(in
), &sq
->tisn
);
1056 static void destroy_raw_packet_qp_tis(struct mlx5_ib_dev
*dev
,
1057 struct mlx5_ib_sq
*sq
)
1059 mlx5_core_destroy_tis(dev
->mdev
, sq
->tisn
);
1062 static int create_raw_packet_qp_sq(struct mlx5_ib_dev
*dev
,
1063 struct mlx5_ib_sq
*sq
, void *qpin
,
1066 struct mlx5_ib_ubuffer
*ubuffer
= &sq
->ubuffer
;
1070 void *qpc
= MLX5_ADDR_OF(create_qp_in
, qpin
, qpc
);
1079 err
= mlx5_ib_umem_get(dev
, pd
, ubuffer
->buf_addr
, ubuffer
->buf_size
,
1080 &sq
->ubuffer
.umem
, &npages
, &page_shift
,
1085 inlen
= MLX5_ST_SZ_BYTES(create_sq_in
) + sizeof(u64
) * ncont
;
1086 in
= mlx5_vzalloc(inlen
);
1092 sqc
= MLX5_ADDR_OF(create_sq_in
, in
, ctx
);
1093 MLX5_SET(sqc
, sqc
, flush_in_error_en
, 1);
1094 MLX5_SET(sqc
, sqc
, state
, MLX5_SQC_STATE_RST
);
1095 MLX5_SET(sqc
, sqc
, user_index
, MLX5_GET(qpc
, qpc
, user_index
));
1096 MLX5_SET(sqc
, sqc
, cqn
, MLX5_GET(qpc
, qpc
, cqn_snd
));
1097 MLX5_SET(sqc
, sqc
, tis_lst_sz
, 1);
1098 MLX5_SET(sqc
, sqc
, tis_num_0
, sq
->tisn
);
1100 wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
1101 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_CYCLIC
);
1102 MLX5_SET(wq
, wq
, pd
, MLX5_GET(qpc
, qpc
, pd
));
1103 MLX5_SET(wq
, wq
, uar_page
, MLX5_GET(qpc
, qpc
, uar_page
));
1104 MLX5_SET64(wq
, wq
, dbr_addr
, MLX5_GET64(qpc
, qpc
, dbr_addr
));
1105 MLX5_SET(wq
, wq
, log_wq_stride
, ilog2(MLX5_SEND_WQE_BB
));
1106 MLX5_SET(wq
, wq
, log_wq_sz
, MLX5_GET(qpc
, qpc
, log_sq_size
));
1107 MLX5_SET(wq
, wq
, log_wq_pg_sz
, page_shift
- MLX5_ADAPTER_PAGE_SHIFT
);
1108 MLX5_SET(wq
, wq
, page_offset
, offset
);
1110 pas
= (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
);
1111 mlx5_ib_populate_pas(dev
, sq
->ubuffer
.umem
, page_shift
, pas
, 0);
1113 err
= mlx5_core_create_sq_tracked(dev
->mdev
, in
, inlen
, &sq
->base
.mqp
);
1123 ib_umem_release(sq
->ubuffer
.umem
);
1124 sq
->ubuffer
.umem
= NULL
;
1129 static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev
*dev
,
1130 struct mlx5_ib_sq
*sq
)
1132 mlx5_core_destroy_sq_tracked(dev
->mdev
, &sq
->base
.mqp
);
1133 ib_umem_release(sq
->ubuffer
.umem
);
1136 static int get_rq_pas_size(void *qpc
)
1138 u32 log_page_size
= MLX5_GET(qpc
, qpc
, log_page_size
) + 12;
1139 u32 log_rq_stride
= MLX5_GET(qpc
, qpc
, log_rq_stride
);
1140 u32 log_rq_size
= MLX5_GET(qpc
, qpc
, log_rq_size
);
1141 u32 page_offset
= MLX5_GET(qpc
, qpc
, page_offset
);
1142 u32 po_quanta
= 1 << (log_page_size
- 6);
1143 u32 rq_sz
= 1 << (log_rq_size
+ 4 + log_rq_stride
);
1144 u32 page_size
= 1 << log_page_size
;
1145 u32 rq_sz_po
= rq_sz
+ (page_offset
* po_quanta
);
1146 u32 rq_num_pas
= (rq_sz_po
+ page_size
- 1) / page_size
;
1148 return rq_num_pas
* sizeof(u64
);
1151 static int create_raw_packet_qp_rq(struct mlx5_ib_dev
*dev
,
1152 struct mlx5_ib_rq
*rq
, void *qpin
)
1154 struct mlx5_ib_qp
*mqp
= rq
->base
.container_mibqp
;
1160 void *qpc
= MLX5_ADDR_OF(create_qp_in
, qpin
, qpc
);
1163 u32 rq_pas_size
= get_rq_pas_size(qpc
);
1165 inlen
= MLX5_ST_SZ_BYTES(create_rq_in
) + rq_pas_size
;
1166 in
= mlx5_vzalloc(inlen
);
1170 rqc
= MLX5_ADDR_OF(create_rq_in
, in
, ctx
);
1171 MLX5_SET(rqc
, rqc
, vsd
, 1);
1172 MLX5_SET(rqc
, rqc
, mem_rq_type
, MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE
);
1173 MLX5_SET(rqc
, rqc
, state
, MLX5_RQC_STATE_RST
);
1174 MLX5_SET(rqc
, rqc
, flush_in_error_en
, 1);
1175 MLX5_SET(rqc
, rqc
, user_index
, MLX5_GET(qpc
, qpc
, user_index
));
1176 MLX5_SET(rqc
, rqc
, cqn
, MLX5_GET(qpc
, qpc
, cqn_rcv
));
1178 if (mqp
->flags
& MLX5_IB_QP_CAP_SCATTER_FCS
)
1179 MLX5_SET(rqc
, rqc
, scatter_fcs
, 1);
1181 wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
1182 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_CYCLIC
);
1183 MLX5_SET(wq
, wq
, end_padding_mode
,
1184 MLX5_GET(qpc
, qpc
, end_padding_mode
));
1185 MLX5_SET(wq
, wq
, page_offset
, MLX5_GET(qpc
, qpc
, page_offset
));
1186 MLX5_SET(wq
, wq
, pd
, MLX5_GET(qpc
, qpc
, pd
));
1187 MLX5_SET64(wq
, wq
, dbr_addr
, MLX5_GET64(qpc
, qpc
, dbr_addr
));
1188 MLX5_SET(wq
, wq
, log_wq_stride
, MLX5_GET(qpc
, qpc
, log_rq_stride
) + 4);
1189 MLX5_SET(wq
, wq
, log_wq_pg_sz
, MLX5_GET(qpc
, qpc
, log_page_size
));
1190 MLX5_SET(wq
, wq
, log_wq_sz
, MLX5_GET(qpc
, qpc
, log_rq_size
));
1192 pas
= (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
);
1193 qp_pas
= (__be64
*)MLX5_ADDR_OF(create_qp_in
, qpin
, pas
);
1194 memcpy(pas
, qp_pas
, rq_pas_size
);
1196 err
= mlx5_core_create_rq_tracked(dev
->mdev
, in
, inlen
, &rq
->base
.mqp
);
1203 static void destroy_raw_packet_qp_rq(struct mlx5_ib_dev
*dev
,
1204 struct mlx5_ib_rq
*rq
)
1206 mlx5_core_destroy_rq_tracked(dev
->mdev
, &rq
->base
.mqp
);
1209 static int create_raw_packet_qp_tir(struct mlx5_ib_dev
*dev
,
1210 struct mlx5_ib_rq
*rq
, u32 tdn
)
1217 inlen
= MLX5_ST_SZ_BYTES(create_tir_in
);
1218 in
= mlx5_vzalloc(inlen
);
1222 tirc
= MLX5_ADDR_OF(create_tir_in
, in
, ctx
);
1223 MLX5_SET(tirc
, tirc
, disp_type
, MLX5_TIRC_DISP_TYPE_DIRECT
);
1224 MLX5_SET(tirc
, tirc
, inline_rqn
, rq
->base
.mqp
.qpn
);
1225 MLX5_SET(tirc
, tirc
, transport_domain
, tdn
);
1227 err
= mlx5_core_create_tir(dev
->mdev
, in
, inlen
, &rq
->tirn
);
1234 static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev
*dev
,
1235 struct mlx5_ib_rq
*rq
)
1237 mlx5_core_destroy_tir(dev
->mdev
, rq
->tirn
);
1240 static int create_raw_packet_qp(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
1244 struct mlx5_ib_raw_packet_qp
*raw_packet_qp
= &qp
->raw_packet_qp
;
1245 struct mlx5_ib_sq
*sq
= &raw_packet_qp
->sq
;
1246 struct mlx5_ib_rq
*rq
= &raw_packet_qp
->rq
;
1247 struct ib_uobject
*uobj
= pd
->uobject
;
1248 struct ib_ucontext
*ucontext
= uobj
->context
;
1249 struct mlx5_ib_ucontext
*mucontext
= to_mucontext(ucontext
);
1251 u32 tdn
= mucontext
->tdn
;
1253 if (qp
->sq
.wqe_cnt
) {
1254 err
= create_raw_packet_qp_tis(dev
, sq
, tdn
);
1258 err
= create_raw_packet_qp_sq(dev
, sq
, in
, pd
);
1260 goto err_destroy_tis
;
1262 sq
->base
.container_mibqp
= qp
;
1265 if (qp
->rq
.wqe_cnt
) {
1266 rq
->base
.container_mibqp
= qp
;
1268 err
= create_raw_packet_qp_rq(dev
, rq
, in
);
1270 goto err_destroy_sq
;
1273 err
= create_raw_packet_qp_tir(dev
, rq
, tdn
);
1275 goto err_destroy_rq
;
1278 qp
->trans_qp
.base
.mqp
.qpn
= qp
->sq
.wqe_cnt
? sq
->base
.mqp
.qpn
:
1284 destroy_raw_packet_qp_rq(dev
, rq
);
1286 if (!qp
->sq
.wqe_cnt
)
1288 destroy_raw_packet_qp_sq(dev
, sq
);
1290 destroy_raw_packet_qp_tis(dev
, sq
);
1295 static void destroy_raw_packet_qp(struct mlx5_ib_dev
*dev
,
1296 struct mlx5_ib_qp
*qp
)
1298 struct mlx5_ib_raw_packet_qp
*raw_packet_qp
= &qp
->raw_packet_qp
;
1299 struct mlx5_ib_sq
*sq
= &raw_packet_qp
->sq
;
1300 struct mlx5_ib_rq
*rq
= &raw_packet_qp
->rq
;
1302 if (qp
->rq
.wqe_cnt
) {
1303 destroy_raw_packet_qp_tir(dev
, rq
);
1304 destroy_raw_packet_qp_rq(dev
, rq
);
1307 if (qp
->sq
.wqe_cnt
) {
1308 destroy_raw_packet_qp_sq(dev
, sq
);
1309 destroy_raw_packet_qp_tis(dev
, sq
);
1313 static void raw_packet_qp_copy_info(struct mlx5_ib_qp
*qp
,
1314 struct mlx5_ib_raw_packet_qp
*raw_packet_qp
)
1316 struct mlx5_ib_sq
*sq
= &raw_packet_qp
->sq
;
1317 struct mlx5_ib_rq
*rq
= &raw_packet_qp
->rq
;
1321 sq
->doorbell
= &qp
->db
;
1322 rq
->doorbell
= &qp
->db
;
1325 static void destroy_rss_raw_qp_tir(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
)
1327 mlx5_core_destroy_tir(dev
->mdev
, qp
->rss_qp
.tirn
);
1330 static int create_rss_raw_qp_tir(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
1332 struct ib_qp_init_attr
*init_attr
,
1333 struct ib_udata
*udata
)
1335 struct ib_uobject
*uobj
= pd
->uobject
;
1336 struct ib_ucontext
*ucontext
= uobj
->context
;
1337 struct mlx5_ib_ucontext
*mucontext
= to_mucontext(ucontext
);
1338 struct mlx5_ib_create_qp_resp resp
= {};
1344 u32 selected_fields
= 0;
1345 size_t min_resp_len
;
1346 u32 tdn
= mucontext
->tdn
;
1347 struct mlx5_ib_create_qp_rss ucmd
= {};
1348 size_t required_cmd_sz
;
1350 if (init_attr
->qp_type
!= IB_QPT_RAW_PACKET
)
1353 if (init_attr
->create_flags
|| init_attr
->send_cq
)
1356 min_resp_len
= offsetof(typeof(resp
), uuar_index
) + sizeof(resp
.uuar_index
);
1357 if (udata
->outlen
< min_resp_len
)
1360 required_cmd_sz
= offsetof(typeof(ucmd
), reserved1
) + sizeof(ucmd
.reserved1
);
1361 if (udata
->inlen
< required_cmd_sz
) {
1362 mlx5_ib_dbg(dev
, "invalid inlen\n");
1366 if (udata
->inlen
> sizeof(ucmd
) &&
1367 !ib_is_udata_cleared(udata
, sizeof(ucmd
),
1368 udata
->inlen
- sizeof(ucmd
))) {
1369 mlx5_ib_dbg(dev
, "inlen is not supported\n");
1373 if (ib_copy_from_udata(&ucmd
, udata
, min(sizeof(ucmd
), udata
->inlen
))) {
1374 mlx5_ib_dbg(dev
, "copy failed\n");
1378 if (ucmd
.comp_mask
) {
1379 mlx5_ib_dbg(dev
, "invalid comp mask\n");
1383 if (memchr_inv(ucmd
.reserved
, 0, sizeof(ucmd
.reserved
)) || ucmd
.reserved1
) {
1384 mlx5_ib_dbg(dev
, "invalid reserved\n");
1388 err
= ib_copy_to_udata(udata
, &resp
, min_resp_len
);
1390 mlx5_ib_dbg(dev
, "copy failed\n");
1394 inlen
= MLX5_ST_SZ_BYTES(create_tir_in
);
1395 in
= mlx5_vzalloc(inlen
);
1399 tirc
= MLX5_ADDR_OF(create_tir_in
, in
, ctx
);
1400 MLX5_SET(tirc
, tirc
, disp_type
,
1401 MLX5_TIRC_DISP_TYPE_INDIRECT
);
1402 MLX5_SET(tirc
, tirc
, indirect_table
,
1403 init_attr
->rwq_ind_tbl
->ind_tbl_num
);
1404 MLX5_SET(tirc
, tirc
, transport_domain
, tdn
);
1406 hfso
= MLX5_ADDR_OF(tirc
, tirc
, rx_hash_field_selector_outer
);
1407 switch (ucmd
.rx_hash_function
) {
1408 case MLX5_RX_HASH_FUNC_TOEPLITZ
:
1410 void *rss_key
= MLX5_ADDR_OF(tirc
, tirc
, rx_hash_toeplitz_key
);
1411 size_t len
= MLX5_FLD_SZ_BYTES(tirc
, rx_hash_toeplitz_key
);
1413 if (len
!= ucmd
.rx_key_len
) {
1418 MLX5_SET(tirc
, tirc
, rx_hash_fn
, MLX5_RX_HASH_FN_TOEPLITZ
);
1419 MLX5_SET(tirc
, tirc
, rx_hash_symmetric
, 1);
1420 memcpy(rss_key
, ucmd
.rx_hash_key
, len
);
1428 if (!ucmd
.rx_hash_fields_mask
) {
1429 /* special case when this TIR serves as steering entry without hashing */
1430 if (!init_attr
->rwq_ind_tbl
->log_ind_tbl_size
)
1436 if (((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_IPV4
) ||
1437 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_IPV4
)) &&
1438 ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_IPV6
) ||
1439 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_IPV6
))) {
1444 /* If none of IPV4 & IPV6 SRC/DST was set - this bit field is ignored */
1445 if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_IPV4
) ||
1446 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_IPV4
))
1447 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1448 MLX5_L3_PROT_TYPE_IPV4
);
1449 else if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_IPV6
) ||
1450 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_IPV6
))
1451 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1452 MLX5_L3_PROT_TYPE_IPV6
);
1454 if (((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_PORT_TCP
) ||
1455 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_PORT_TCP
)) &&
1456 ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_PORT_UDP
) ||
1457 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_PORT_UDP
))) {
1462 /* If none of TCP & UDP SRC/DST was set - this bit field is ignored */
1463 if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_PORT_TCP
) ||
1464 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_PORT_TCP
))
1465 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
1466 MLX5_L4_PROT_TYPE_TCP
);
1467 else if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_PORT_UDP
) ||
1468 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_PORT_UDP
))
1469 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
1470 MLX5_L4_PROT_TYPE_UDP
);
1472 if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_IPV4
) ||
1473 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_IPV6
))
1474 selected_fields
|= MLX5_HASH_FIELD_SEL_SRC_IP
;
1476 if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_IPV4
) ||
1477 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_IPV6
))
1478 selected_fields
|= MLX5_HASH_FIELD_SEL_DST_IP
;
1480 if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_PORT_TCP
) ||
1481 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_PORT_UDP
))
1482 selected_fields
|= MLX5_HASH_FIELD_SEL_L4_SPORT
;
1484 if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_PORT_TCP
) ||
1485 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_PORT_UDP
))
1486 selected_fields
|= MLX5_HASH_FIELD_SEL_L4_DPORT
;
1488 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
, selected_fields
);
1491 err
= mlx5_core_create_tir(dev
->mdev
, in
, inlen
, &qp
->rss_qp
.tirn
);
1497 /* qpn is reserved for that QP */
1498 qp
->trans_qp
.base
.mqp
.qpn
= 0;
1499 qp
->flags
|= MLX5_IB_QP_RSS
;
1507 static int create_qp_common(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
1508 struct ib_qp_init_attr
*init_attr
,
1509 struct ib_udata
*udata
, struct mlx5_ib_qp
*qp
)
1511 struct mlx5_ib_resources
*devr
= &dev
->devr
;
1512 int inlen
= MLX5_ST_SZ_BYTES(create_qp_in
);
1513 struct mlx5_core_dev
*mdev
= dev
->mdev
;
1514 struct mlx5_ib_create_qp_resp resp
;
1515 struct mlx5_ib_cq
*send_cq
;
1516 struct mlx5_ib_cq
*recv_cq
;
1517 unsigned long flags
;
1518 u32 uidx
= MLX5_IB_DEFAULT_UIDX
;
1519 struct mlx5_ib_create_qp ucmd
;
1520 struct mlx5_ib_qp_base
*base
;
1525 base
= init_attr
->qp_type
== IB_QPT_RAW_PACKET
?
1526 &qp
->raw_packet_qp
.rq
.base
:
1529 if (init_attr
->qp_type
!= IB_QPT_RAW_PACKET
)
1530 mlx5_ib_odp_create_qp(qp
);
1532 mutex_init(&qp
->mutex
);
1533 spin_lock_init(&qp
->sq
.lock
);
1534 spin_lock_init(&qp
->rq
.lock
);
1536 if (init_attr
->rwq_ind_tbl
) {
1540 err
= create_rss_raw_qp_tir(dev
, qp
, pd
, init_attr
, udata
);
1544 if (init_attr
->create_flags
& IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
) {
1545 if (!MLX5_CAP_GEN(mdev
, block_lb_mc
)) {
1546 mlx5_ib_dbg(dev
, "block multicast loopback isn't supported\n");
1549 qp
->flags
|= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK
;
1553 if (init_attr
->create_flags
&
1554 (IB_QP_CREATE_CROSS_CHANNEL
|
1555 IB_QP_CREATE_MANAGED_SEND
|
1556 IB_QP_CREATE_MANAGED_RECV
)) {
1557 if (!MLX5_CAP_GEN(mdev
, cd
)) {
1558 mlx5_ib_dbg(dev
, "cross-channel isn't supported\n");
1561 if (init_attr
->create_flags
& IB_QP_CREATE_CROSS_CHANNEL
)
1562 qp
->flags
|= MLX5_IB_QP_CROSS_CHANNEL
;
1563 if (init_attr
->create_flags
& IB_QP_CREATE_MANAGED_SEND
)
1564 qp
->flags
|= MLX5_IB_QP_MANAGED_SEND
;
1565 if (init_attr
->create_flags
& IB_QP_CREATE_MANAGED_RECV
)
1566 qp
->flags
|= MLX5_IB_QP_MANAGED_RECV
;
1569 if (init_attr
->qp_type
== IB_QPT_UD
&&
1570 (init_attr
->create_flags
& IB_QP_CREATE_IPOIB_UD_LSO
))
1571 if (!MLX5_CAP_GEN(mdev
, ipoib_basic_offloads
)) {
1572 mlx5_ib_dbg(dev
, "ipoib UD lso qp isn't supported\n");
1576 if (init_attr
->create_flags
& IB_QP_CREATE_SCATTER_FCS
) {
1577 if (init_attr
->qp_type
!= IB_QPT_RAW_PACKET
) {
1578 mlx5_ib_dbg(dev
, "Scatter FCS is supported only for Raw Packet QPs");
1581 if (!MLX5_CAP_GEN(dev
->mdev
, eth_net_offloads
) ||
1582 !MLX5_CAP_ETH(dev
->mdev
, scatter_fcs
)) {
1583 mlx5_ib_dbg(dev
, "Scatter FCS isn't supported\n");
1586 qp
->flags
|= MLX5_IB_QP_CAP_SCATTER_FCS
;
1589 if (init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
)
1590 qp
->sq_signal_bits
= MLX5_WQE_CTRL_CQ_UPDATE
;
1592 if (pd
&& pd
->uobject
) {
1593 if (ib_copy_from_udata(&ucmd
, udata
, sizeof(ucmd
))) {
1594 mlx5_ib_dbg(dev
, "copy failed\n");
1598 err
= get_qp_user_index(to_mucontext(pd
->uobject
->context
),
1599 &ucmd
, udata
->inlen
, &uidx
);
1603 qp
->wq_sig
= !!(ucmd
.flags
& MLX5_QP_FLAG_SIGNATURE
);
1604 qp
->scat_cqe
= !!(ucmd
.flags
& MLX5_QP_FLAG_SCATTER_CQE
);
1606 qp
->wq_sig
= !!wq_signature
;
1609 qp
->has_rq
= qp_has_rq(init_attr
);
1610 err
= set_rq_size(dev
, &init_attr
->cap
, qp
->has_rq
,
1611 qp
, (pd
&& pd
->uobject
) ? &ucmd
: NULL
);
1613 mlx5_ib_dbg(dev
, "err %d\n", err
);
1620 1 << MLX5_CAP_GEN(mdev
, log_max_qp_sz
);
1621 mlx5_ib_dbg(dev
, "requested sq_wqe_count (%d)\n", ucmd
.sq_wqe_count
);
1622 if (ucmd
.rq_wqe_shift
!= qp
->rq
.wqe_shift
||
1623 ucmd
.rq_wqe_count
!= qp
->rq
.wqe_cnt
) {
1624 mlx5_ib_dbg(dev
, "invalid rq params\n");
1627 if (ucmd
.sq_wqe_count
> max_wqes
) {
1628 mlx5_ib_dbg(dev
, "requested sq_wqe_count (%d) > max allowed (%d)\n",
1629 ucmd
.sq_wqe_count
, max_wqes
);
1632 if (init_attr
->create_flags
&
1633 mlx5_ib_create_qp_sqpn_qp1()) {
1634 mlx5_ib_dbg(dev
, "user-space is not allowed to create UD QPs spoofing as QP1\n");
1637 err
= create_user_qp(dev
, pd
, qp
, udata
, init_attr
, &in
,
1638 &resp
, &inlen
, base
);
1640 mlx5_ib_dbg(dev
, "err %d\n", err
);
1642 err
= create_kernel_qp(dev
, init_attr
, qp
, &in
, &inlen
,
1645 mlx5_ib_dbg(dev
, "err %d\n", err
);
1651 in
= mlx5_vzalloc(inlen
);
1655 qp
->create_type
= MLX5_QP_EMPTY
;
1658 if (is_sqp(init_attr
->qp_type
))
1659 qp
->port
= init_attr
->port_num
;
1661 qpc
= MLX5_ADDR_OF(create_qp_in
, in
, qpc
);
1663 MLX5_SET(qpc
, qpc
, st
, to_mlx5_st(init_attr
->qp_type
));
1664 MLX5_SET(qpc
, qpc
, pm_state
, MLX5_QP_PM_MIGRATED
);
1666 if (init_attr
->qp_type
!= MLX5_IB_QPT_REG_UMR
)
1667 MLX5_SET(qpc
, qpc
, pd
, to_mpd(pd
? pd
: devr
->p0
)->pdn
);
1669 MLX5_SET(qpc
, qpc
, latency_sensitive
, 1);
1673 MLX5_SET(qpc
, qpc
, wq_signature
, 1);
1675 if (qp
->flags
& MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK
)
1676 MLX5_SET(qpc
, qpc
, block_lb_mc
, 1);
1678 if (qp
->flags
& MLX5_IB_QP_CROSS_CHANNEL
)
1679 MLX5_SET(qpc
, qpc
, cd_master
, 1);
1680 if (qp
->flags
& MLX5_IB_QP_MANAGED_SEND
)
1681 MLX5_SET(qpc
, qpc
, cd_slave_send
, 1);
1682 if (qp
->flags
& MLX5_IB_QP_MANAGED_RECV
)
1683 MLX5_SET(qpc
, qpc
, cd_slave_receive
, 1);
1685 if (qp
->scat_cqe
&& is_connected(init_attr
->qp_type
)) {
1689 rcqe_sz
= mlx5_ib_get_cqe_size(dev
, init_attr
->recv_cq
);
1690 scqe_sz
= mlx5_ib_get_cqe_size(dev
, init_attr
->send_cq
);
1693 MLX5_SET(qpc
, qpc
, cs_res
, MLX5_RES_SCAT_DATA64_CQE
);
1695 MLX5_SET(qpc
, qpc
, cs_res
, MLX5_RES_SCAT_DATA32_CQE
);
1697 if (init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
) {
1699 MLX5_SET(qpc
, qpc
, cs_req
, MLX5_REQ_SCAT_DATA64_CQE
);
1701 MLX5_SET(qpc
, qpc
, cs_req
, MLX5_REQ_SCAT_DATA32_CQE
);
1705 if (qp
->rq
.wqe_cnt
) {
1706 MLX5_SET(qpc
, qpc
, log_rq_stride
, qp
->rq
.wqe_shift
- 4);
1707 MLX5_SET(qpc
, qpc
, log_rq_size
, ilog2(qp
->rq
.wqe_cnt
));
1710 MLX5_SET(qpc
, qpc
, rq_type
, get_rx_type(qp
, init_attr
));
1713 MLX5_SET(qpc
, qpc
, log_sq_size
, ilog2(qp
->sq
.wqe_cnt
));
1715 MLX5_SET(qpc
, qpc
, no_sq
, 1);
1717 /* Set default resources */
1718 switch (init_attr
->qp_type
) {
1719 case IB_QPT_XRC_TGT
:
1720 MLX5_SET(qpc
, qpc
, cqn_rcv
, to_mcq(devr
->c0
)->mcq
.cqn
);
1721 MLX5_SET(qpc
, qpc
, cqn_snd
, to_mcq(devr
->c0
)->mcq
.cqn
);
1722 MLX5_SET(qpc
, qpc
, srqn_rmpn_xrqn
, to_msrq(devr
->s0
)->msrq
.srqn
);
1723 MLX5_SET(qpc
, qpc
, xrcd
, to_mxrcd(init_attr
->xrcd
)->xrcdn
);
1725 case IB_QPT_XRC_INI
:
1726 MLX5_SET(qpc
, qpc
, cqn_rcv
, to_mcq(devr
->c0
)->mcq
.cqn
);
1727 MLX5_SET(qpc
, qpc
, xrcd
, to_mxrcd(devr
->x1
)->xrcdn
);
1728 MLX5_SET(qpc
, qpc
, srqn_rmpn_xrqn
, to_msrq(devr
->s0
)->msrq
.srqn
);
1731 if (init_attr
->srq
) {
1732 MLX5_SET(qpc
, qpc
, xrcd
, to_mxrcd(devr
->x0
)->xrcdn
);
1733 MLX5_SET(qpc
, qpc
, srqn_rmpn_xrqn
, to_msrq(init_attr
->srq
)->msrq
.srqn
);
1735 MLX5_SET(qpc
, qpc
, xrcd
, to_mxrcd(devr
->x1
)->xrcdn
);
1736 MLX5_SET(qpc
, qpc
, srqn_rmpn_xrqn
, to_msrq(devr
->s1
)->msrq
.srqn
);
1740 if (init_attr
->send_cq
)
1741 MLX5_SET(qpc
, qpc
, cqn_snd
, to_mcq(init_attr
->send_cq
)->mcq
.cqn
);
1743 if (init_attr
->recv_cq
)
1744 MLX5_SET(qpc
, qpc
, cqn_rcv
, to_mcq(init_attr
->recv_cq
)->mcq
.cqn
);
1746 MLX5_SET64(qpc
, qpc
, dbr_addr
, qp
->db
.dma
);
1748 /* 0xffffff means we ask to work with cqe version 0 */
1749 if (MLX5_CAP_GEN(mdev
, cqe_version
) == MLX5_CQE_VERSION_V1
)
1750 MLX5_SET(qpc
, qpc
, user_index
, uidx
);
1752 /* we use IB_QP_CREATE_IPOIB_UD_LSO to indicates ipoib qp */
1753 if (init_attr
->qp_type
== IB_QPT_UD
&&
1754 (init_attr
->create_flags
& IB_QP_CREATE_IPOIB_UD_LSO
)) {
1755 MLX5_SET(qpc
, qpc
, ulp_stateless_offload_mode
, 1);
1756 qp
->flags
|= MLX5_IB_QP_LSO
;
1759 if (init_attr
->qp_type
== IB_QPT_RAW_PACKET
) {
1760 qp
->raw_packet_qp
.sq
.ubuffer
.buf_addr
= ucmd
.sq_buf_addr
;
1761 raw_packet_qp_copy_info(qp
, &qp
->raw_packet_qp
);
1762 err
= create_raw_packet_qp(dev
, qp
, in
, pd
);
1764 err
= mlx5_core_create_qp(dev
->mdev
, &base
->mqp
, in
, inlen
);
1768 mlx5_ib_dbg(dev
, "create qp failed\n");
1774 base
->container_mibqp
= qp
;
1775 base
->mqp
.event
= mlx5_ib_qp_event
;
1777 get_cqs(init_attr
->qp_type
, init_attr
->send_cq
, init_attr
->recv_cq
,
1778 &send_cq
, &recv_cq
);
1779 spin_lock_irqsave(&dev
->reset_flow_resource_lock
, flags
);
1780 mlx5_ib_lock_cqs(send_cq
, recv_cq
);
1781 /* Maintain device to QPs access, needed for further handling via reset
1784 list_add_tail(&qp
->qps_list
, &dev
->qp_list
);
1785 /* Maintain CQ to QPs access, needed for further handling via reset flow
1788 list_add_tail(&qp
->cq_send_list
, &send_cq
->list_send_qp
);
1790 list_add_tail(&qp
->cq_recv_list
, &recv_cq
->list_recv_qp
);
1791 mlx5_ib_unlock_cqs(send_cq
, recv_cq
);
1792 spin_unlock_irqrestore(&dev
->reset_flow_resource_lock
, flags
);
1797 if (qp
->create_type
== MLX5_QP_USER
)
1798 destroy_qp_user(pd
, qp
, base
);
1799 else if (qp
->create_type
== MLX5_QP_KERNEL
)
1800 destroy_qp_kernel(dev
, qp
);
1806 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq
*send_cq
, struct mlx5_ib_cq
*recv_cq
)
1807 __acquires(&send_cq
->lock
) __acquires(&recv_cq
->lock
)
1811 if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
1812 spin_lock(&send_cq
->lock
);
1813 spin_lock_nested(&recv_cq
->lock
,
1814 SINGLE_DEPTH_NESTING
);
1815 } else if (send_cq
->mcq
.cqn
== recv_cq
->mcq
.cqn
) {
1816 spin_lock(&send_cq
->lock
);
1817 __acquire(&recv_cq
->lock
);
1819 spin_lock(&recv_cq
->lock
);
1820 spin_lock_nested(&send_cq
->lock
,
1821 SINGLE_DEPTH_NESTING
);
1824 spin_lock(&send_cq
->lock
);
1825 __acquire(&recv_cq
->lock
);
1827 } else if (recv_cq
) {
1828 spin_lock(&recv_cq
->lock
);
1829 __acquire(&send_cq
->lock
);
1831 __acquire(&send_cq
->lock
);
1832 __acquire(&recv_cq
->lock
);
1836 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq
*send_cq
, struct mlx5_ib_cq
*recv_cq
)
1837 __releases(&send_cq
->lock
) __releases(&recv_cq
->lock
)
1841 if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
1842 spin_unlock(&recv_cq
->lock
);
1843 spin_unlock(&send_cq
->lock
);
1844 } else if (send_cq
->mcq
.cqn
== recv_cq
->mcq
.cqn
) {
1845 __release(&recv_cq
->lock
);
1846 spin_unlock(&send_cq
->lock
);
1848 spin_unlock(&send_cq
->lock
);
1849 spin_unlock(&recv_cq
->lock
);
1852 __release(&recv_cq
->lock
);
1853 spin_unlock(&send_cq
->lock
);
1855 } else if (recv_cq
) {
1856 __release(&send_cq
->lock
);
1857 spin_unlock(&recv_cq
->lock
);
1859 __release(&recv_cq
->lock
);
1860 __release(&send_cq
->lock
);
1864 static struct mlx5_ib_pd
*get_pd(struct mlx5_ib_qp
*qp
)
1866 return to_mpd(qp
->ibqp
.pd
);
1869 static void get_cqs(enum ib_qp_type qp_type
,
1870 struct ib_cq
*ib_send_cq
, struct ib_cq
*ib_recv_cq
,
1871 struct mlx5_ib_cq
**send_cq
, struct mlx5_ib_cq
**recv_cq
)
1874 case IB_QPT_XRC_TGT
:
1878 case MLX5_IB_QPT_REG_UMR
:
1879 case IB_QPT_XRC_INI
:
1880 *send_cq
= ib_send_cq
? to_mcq(ib_send_cq
) : NULL
;
1885 case MLX5_IB_QPT_HW_GSI
:
1889 case IB_QPT_RAW_IPV6
:
1890 case IB_QPT_RAW_ETHERTYPE
:
1891 case IB_QPT_RAW_PACKET
:
1892 *send_cq
= ib_send_cq
? to_mcq(ib_send_cq
) : NULL
;
1893 *recv_cq
= ib_recv_cq
? to_mcq(ib_recv_cq
) : NULL
;
1904 static int modify_raw_packet_qp(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
1905 const struct mlx5_modify_raw_qp_param
*raw_qp_param
,
1906 u8 lag_tx_affinity
);
1908 static void destroy_qp_common(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
)
1910 struct mlx5_ib_cq
*send_cq
, *recv_cq
;
1911 struct mlx5_ib_qp_base
*base
= &qp
->trans_qp
.base
;
1912 unsigned long flags
;
1915 if (qp
->ibqp
.rwq_ind_tbl
) {
1916 destroy_rss_raw_qp_tir(dev
, qp
);
1920 base
= qp
->ibqp
.qp_type
== IB_QPT_RAW_PACKET
?
1921 &qp
->raw_packet_qp
.rq
.base
:
1924 if (qp
->state
!= IB_QPS_RESET
) {
1925 if (qp
->ibqp
.qp_type
!= IB_QPT_RAW_PACKET
) {
1926 mlx5_ib_qp_disable_pagefaults(qp
);
1927 err
= mlx5_core_qp_modify(dev
->mdev
,
1928 MLX5_CMD_OP_2RST_QP
, 0,
1931 struct mlx5_modify_raw_qp_param raw_qp_param
= {
1932 .operation
= MLX5_CMD_OP_2RST_QP
1935 err
= modify_raw_packet_qp(dev
, qp
, &raw_qp_param
, 0);
1938 mlx5_ib_warn(dev
, "mlx5_ib: modify QP 0x%06x to RESET failed\n",
1942 get_cqs(qp
->ibqp
.qp_type
, qp
->ibqp
.send_cq
, qp
->ibqp
.recv_cq
,
1943 &send_cq
, &recv_cq
);
1945 spin_lock_irqsave(&dev
->reset_flow_resource_lock
, flags
);
1946 mlx5_ib_lock_cqs(send_cq
, recv_cq
);
1947 /* del from lists under both locks above to protect reset flow paths */
1948 list_del(&qp
->qps_list
);
1950 list_del(&qp
->cq_send_list
);
1953 list_del(&qp
->cq_recv_list
);
1955 if (qp
->create_type
== MLX5_QP_KERNEL
) {
1956 __mlx5_ib_cq_clean(recv_cq
, base
->mqp
.qpn
,
1957 qp
->ibqp
.srq
? to_msrq(qp
->ibqp
.srq
) : NULL
);
1958 if (send_cq
!= recv_cq
)
1959 __mlx5_ib_cq_clean(send_cq
, base
->mqp
.qpn
,
1962 mlx5_ib_unlock_cqs(send_cq
, recv_cq
);
1963 spin_unlock_irqrestore(&dev
->reset_flow_resource_lock
, flags
);
1965 if (qp
->ibqp
.qp_type
== IB_QPT_RAW_PACKET
) {
1966 destroy_raw_packet_qp(dev
, qp
);
1968 err
= mlx5_core_destroy_qp(dev
->mdev
, &base
->mqp
);
1970 mlx5_ib_warn(dev
, "failed to destroy QP 0x%x\n",
1974 if (qp
->create_type
== MLX5_QP_KERNEL
)
1975 destroy_qp_kernel(dev
, qp
);
1976 else if (qp
->create_type
== MLX5_QP_USER
)
1977 destroy_qp_user(&get_pd(qp
)->ibpd
, qp
, base
);
1980 static const char *ib_qp_type_str(enum ib_qp_type type
)
1984 return "IB_QPT_SMI";
1986 return "IB_QPT_GSI";
1993 case IB_QPT_RAW_IPV6
:
1994 return "IB_QPT_RAW_IPV6";
1995 case IB_QPT_RAW_ETHERTYPE
:
1996 return "IB_QPT_RAW_ETHERTYPE";
1997 case IB_QPT_XRC_INI
:
1998 return "IB_QPT_XRC_INI";
1999 case IB_QPT_XRC_TGT
:
2000 return "IB_QPT_XRC_TGT";
2001 case IB_QPT_RAW_PACKET
:
2002 return "IB_QPT_RAW_PACKET";
2003 case MLX5_IB_QPT_REG_UMR
:
2004 return "MLX5_IB_QPT_REG_UMR";
2007 return "Invalid QP type";
2011 struct ib_qp
*mlx5_ib_create_qp(struct ib_pd
*pd
,
2012 struct ib_qp_init_attr
*init_attr
,
2013 struct ib_udata
*udata
)
2015 struct mlx5_ib_dev
*dev
;
2016 struct mlx5_ib_qp
*qp
;
2021 dev
= to_mdev(pd
->device
);
2023 if (init_attr
->qp_type
== IB_QPT_RAW_PACKET
) {
2025 mlx5_ib_dbg(dev
, "Raw Packet QP is not supported for kernel consumers\n");
2026 return ERR_PTR(-EINVAL
);
2027 } else if (!to_mucontext(pd
->uobject
->context
)->cqe_version
) {
2028 mlx5_ib_dbg(dev
, "Raw Packet QP is only supported for CQE version > 0\n");
2029 return ERR_PTR(-EINVAL
);
2033 /* being cautious here */
2034 if (init_attr
->qp_type
!= IB_QPT_XRC_TGT
&&
2035 init_attr
->qp_type
!= MLX5_IB_QPT_REG_UMR
) {
2036 pr_warn("%s: no PD for transport %s\n", __func__
,
2037 ib_qp_type_str(init_attr
->qp_type
));
2038 return ERR_PTR(-EINVAL
);
2040 dev
= to_mdev(to_mxrcd(init_attr
->xrcd
)->ibxrcd
.device
);
2043 switch (init_attr
->qp_type
) {
2044 case IB_QPT_XRC_TGT
:
2045 case IB_QPT_XRC_INI
:
2046 if (!MLX5_CAP_GEN(dev
->mdev
, xrc
)) {
2047 mlx5_ib_dbg(dev
, "XRC not supported\n");
2048 return ERR_PTR(-ENOSYS
);
2050 init_attr
->recv_cq
= NULL
;
2051 if (init_attr
->qp_type
== IB_QPT_XRC_TGT
) {
2052 xrcdn
= to_mxrcd(init_attr
->xrcd
)->xrcdn
;
2053 init_attr
->send_cq
= NULL
;
2057 case IB_QPT_RAW_PACKET
:
2062 case MLX5_IB_QPT_HW_GSI
:
2063 case MLX5_IB_QPT_REG_UMR
:
2064 qp
= kzalloc(sizeof(*qp
), GFP_KERNEL
);
2066 return ERR_PTR(-ENOMEM
);
2068 err
= create_qp_common(dev
, pd
, init_attr
, udata
, qp
);
2070 mlx5_ib_dbg(dev
, "create_qp_common failed\n");
2072 return ERR_PTR(err
);
2075 if (is_qp0(init_attr
->qp_type
))
2076 qp
->ibqp
.qp_num
= 0;
2077 else if (is_qp1(init_attr
->qp_type
))
2078 qp
->ibqp
.qp_num
= 1;
2080 qp
->ibqp
.qp_num
= qp
->trans_qp
.base
.mqp
.qpn
;
2082 mlx5_ib_dbg(dev
, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
2083 qp
->ibqp
.qp_num
, qp
->trans_qp
.base
.mqp
.qpn
,
2084 init_attr
->recv_cq
? to_mcq(init_attr
->recv_cq
)->mcq
.cqn
: -1,
2085 init_attr
->send_cq
? to_mcq(init_attr
->send_cq
)->mcq
.cqn
: -1);
2087 qp
->trans_qp
.xrcdn
= xrcdn
;
2092 return mlx5_ib_gsi_create_qp(pd
, init_attr
);
2094 case IB_QPT_RAW_IPV6
:
2095 case IB_QPT_RAW_ETHERTYPE
:
2098 mlx5_ib_dbg(dev
, "unsupported qp type %d\n",
2099 init_attr
->qp_type
);
2100 /* Don't support raw QPs */
2101 return ERR_PTR(-EINVAL
);
2107 int mlx5_ib_destroy_qp(struct ib_qp
*qp
)
2109 struct mlx5_ib_dev
*dev
= to_mdev(qp
->device
);
2110 struct mlx5_ib_qp
*mqp
= to_mqp(qp
);
2112 if (unlikely(qp
->qp_type
== IB_QPT_GSI
))
2113 return mlx5_ib_gsi_destroy_qp(qp
);
2115 destroy_qp_common(dev
, mqp
);
2122 static __be32
to_mlx5_access_flags(struct mlx5_ib_qp
*qp
, const struct ib_qp_attr
*attr
,
2125 u32 hw_access_flags
= 0;
2129 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
2130 dest_rd_atomic
= attr
->max_dest_rd_atomic
;
2132 dest_rd_atomic
= qp
->trans_qp
.resp_depth
;
2134 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
2135 access_flags
= attr
->qp_access_flags
;
2137 access_flags
= qp
->trans_qp
.atomic_rd_en
;
2139 if (!dest_rd_atomic
)
2140 access_flags
&= IB_ACCESS_REMOTE_WRITE
;
2142 if (access_flags
& IB_ACCESS_REMOTE_READ
)
2143 hw_access_flags
|= MLX5_QP_BIT_RRE
;
2144 if (access_flags
& IB_ACCESS_REMOTE_ATOMIC
)
2145 hw_access_flags
|= (MLX5_QP_BIT_RAE
| MLX5_ATOMIC_MODE_CX
);
2146 if (access_flags
& IB_ACCESS_REMOTE_WRITE
)
2147 hw_access_flags
|= MLX5_QP_BIT_RWE
;
2149 return cpu_to_be32(hw_access_flags
);
2153 MLX5_PATH_FLAG_FL
= 1 << 0,
2154 MLX5_PATH_FLAG_FREE_AR
= 1 << 1,
2155 MLX5_PATH_FLAG_COUNTER
= 1 << 2,
2158 static int ib_rate_to_mlx5(struct mlx5_ib_dev
*dev
, u8 rate
)
2160 if (rate
== IB_RATE_PORT_CURRENT
) {
2162 } else if (rate
< IB_RATE_2_5_GBPS
|| rate
> IB_RATE_300_GBPS
) {
2165 while (rate
!= IB_RATE_2_5_GBPS
&&
2166 !(1 << (rate
+ MLX5_STAT_RATE_OFFSET
) &
2167 MLX5_CAP_GEN(dev
->mdev
, stat_rate_support
)))
2171 return rate
+ MLX5_STAT_RATE_OFFSET
;
2174 static int modify_raw_packet_eth_prio(struct mlx5_core_dev
*dev
,
2175 struct mlx5_ib_sq
*sq
, u8 sl
)
2182 inlen
= MLX5_ST_SZ_BYTES(modify_tis_in
);
2183 in
= mlx5_vzalloc(inlen
);
2187 MLX5_SET(modify_tis_in
, in
, bitmask
.prio
, 1);
2189 tisc
= MLX5_ADDR_OF(modify_tis_in
, in
, ctx
);
2190 MLX5_SET(tisc
, tisc
, prio
, ((sl
& 0x7) << 1));
2192 err
= mlx5_core_modify_tis(dev
, sq
->tisn
, in
, inlen
);
2199 static int modify_raw_packet_tx_affinity(struct mlx5_core_dev
*dev
,
2200 struct mlx5_ib_sq
*sq
, u8 tx_affinity
)
2207 inlen
= MLX5_ST_SZ_BYTES(modify_tis_in
);
2208 in
= mlx5_vzalloc(inlen
);
2212 MLX5_SET(modify_tis_in
, in
, bitmask
.lag_tx_port_affinity
, 1);
2214 tisc
= MLX5_ADDR_OF(modify_tis_in
, in
, ctx
);
2215 MLX5_SET(tisc
, tisc
, lag_tx_port_affinity
, tx_affinity
);
2217 err
= mlx5_core_modify_tis(dev
, sq
->tisn
, in
, inlen
);
2224 static int mlx5_set_path(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
2225 const struct ib_ah_attr
*ah
,
2226 struct mlx5_qp_path
*path
, u8 port
, int attr_mask
,
2227 u32 path_flags
, const struct ib_qp_attr
*attr
,
2230 enum rdma_link_layer ll
= rdma_port_get_link_layer(&dev
->ib_dev
, port
);
2233 if (attr_mask
& IB_QP_PKEY_INDEX
)
2234 path
->pkey_index
= cpu_to_be16(alt
? attr
->alt_pkey_index
:
2237 if (ah
->ah_flags
& IB_AH_GRH
) {
2238 if (ah
->grh
.sgid_index
>=
2239 dev
->mdev
->port_caps
[port
- 1].gid_table_len
) {
2240 pr_err("sgid_index (%u) too large. max is %d\n",
2242 dev
->mdev
->port_caps
[port
- 1].gid_table_len
);
2247 if (ll
== IB_LINK_LAYER_ETHERNET
) {
2248 if (!(ah
->ah_flags
& IB_AH_GRH
))
2250 memcpy(path
->rmac
, ah
->dmac
, sizeof(ah
->dmac
));
2251 path
->udp_sport
= mlx5_get_roce_udp_sport(dev
, port
,
2252 ah
->grh
.sgid_index
);
2253 path
->dci_cfi_prio_sl
= (ah
->sl
& 0x7) << 4;
2255 path
->fl_free_ar
= (path_flags
& MLX5_PATH_FLAG_FL
) ? 0x80 : 0;
2257 (path_flags
& MLX5_PATH_FLAG_FREE_AR
) ? 0x40 : 0;
2258 path
->rlid
= cpu_to_be16(ah
->dlid
);
2259 path
->grh_mlid
= ah
->src_path_bits
& 0x7f;
2260 if (ah
->ah_flags
& IB_AH_GRH
)
2261 path
->grh_mlid
|= 1 << 7;
2262 path
->dci_cfi_prio_sl
= ah
->sl
& 0xf;
2265 if (ah
->ah_flags
& IB_AH_GRH
) {
2266 path
->mgid_index
= ah
->grh
.sgid_index
;
2267 path
->hop_limit
= ah
->grh
.hop_limit
;
2268 path
->tclass_flowlabel
=
2269 cpu_to_be32((ah
->grh
.traffic_class
<< 20) |
2270 (ah
->grh
.flow_label
));
2271 memcpy(path
->rgid
, ah
->grh
.dgid
.raw
, 16);
2274 err
= ib_rate_to_mlx5(dev
, ah
->static_rate
);
2277 path
->static_rate
= err
;
2280 if (attr_mask
& IB_QP_TIMEOUT
)
2281 path
->ackto_lt
= (alt
? attr
->alt_timeout
: attr
->timeout
) << 3;
2283 if ((qp
->ibqp
.qp_type
== IB_QPT_RAW_PACKET
) && qp
->sq
.wqe_cnt
)
2284 return modify_raw_packet_eth_prio(dev
->mdev
,
2285 &qp
->raw_packet_qp
.sq
,
2291 static enum mlx5_qp_optpar opt_mask
[MLX5_QP_NUM_STATE
][MLX5_QP_NUM_STATE
][MLX5_QP_ST_MAX
] = {
2292 [MLX5_QP_STATE_INIT
] = {
2293 [MLX5_QP_STATE_INIT
] = {
2294 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_RRE
|
2295 MLX5_QP_OPTPAR_RAE
|
2296 MLX5_QP_OPTPAR_RWE
|
2297 MLX5_QP_OPTPAR_PKEY_INDEX
|
2298 MLX5_QP_OPTPAR_PRI_PORT
,
2299 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_RWE
|
2300 MLX5_QP_OPTPAR_PKEY_INDEX
|
2301 MLX5_QP_OPTPAR_PRI_PORT
,
2302 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_PKEY_INDEX
|
2303 MLX5_QP_OPTPAR_Q_KEY
|
2304 MLX5_QP_OPTPAR_PRI_PORT
,
2306 [MLX5_QP_STATE_RTR
] = {
2307 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
2308 MLX5_QP_OPTPAR_RRE
|
2309 MLX5_QP_OPTPAR_RAE
|
2310 MLX5_QP_OPTPAR_RWE
|
2311 MLX5_QP_OPTPAR_PKEY_INDEX
,
2312 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
2313 MLX5_QP_OPTPAR_RWE
|
2314 MLX5_QP_OPTPAR_PKEY_INDEX
,
2315 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_PKEY_INDEX
|
2316 MLX5_QP_OPTPAR_Q_KEY
,
2317 [MLX5_QP_ST_MLX
] = MLX5_QP_OPTPAR_PKEY_INDEX
|
2318 MLX5_QP_OPTPAR_Q_KEY
,
2319 [MLX5_QP_ST_XRC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
2320 MLX5_QP_OPTPAR_RRE
|
2321 MLX5_QP_OPTPAR_RAE
|
2322 MLX5_QP_OPTPAR_RWE
|
2323 MLX5_QP_OPTPAR_PKEY_INDEX
,
2326 [MLX5_QP_STATE_RTR
] = {
2327 [MLX5_QP_STATE_RTS
] = {
2328 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
2329 MLX5_QP_OPTPAR_RRE
|
2330 MLX5_QP_OPTPAR_RAE
|
2331 MLX5_QP_OPTPAR_RWE
|
2332 MLX5_QP_OPTPAR_PM_STATE
|
2333 MLX5_QP_OPTPAR_RNR_TIMEOUT
,
2334 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
2335 MLX5_QP_OPTPAR_RWE
|
2336 MLX5_QP_OPTPAR_PM_STATE
,
2337 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_Q_KEY
,
2340 [MLX5_QP_STATE_RTS
] = {
2341 [MLX5_QP_STATE_RTS
] = {
2342 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_RRE
|
2343 MLX5_QP_OPTPAR_RAE
|
2344 MLX5_QP_OPTPAR_RWE
|
2345 MLX5_QP_OPTPAR_RNR_TIMEOUT
|
2346 MLX5_QP_OPTPAR_PM_STATE
|
2347 MLX5_QP_OPTPAR_ALT_ADDR_PATH
,
2348 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_RWE
|
2349 MLX5_QP_OPTPAR_PM_STATE
|
2350 MLX5_QP_OPTPAR_ALT_ADDR_PATH
,
2351 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_Q_KEY
|
2352 MLX5_QP_OPTPAR_SRQN
|
2353 MLX5_QP_OPTPAR_CQN_RCV
,
2356 [MLX5_QP_STATE_SQER
] = {
2357 [MLX5_QP_STATE_RTS
] = {
2358 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_Q_KEY
,
2359 [MLX5_QP_ST_MLX
] = MLX5_QP_OPTPAR_Q_KEY
,
2360 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_RWE
,
2361 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_RNR_TIMEOUT
|
2362 MLX5_QP_OPTPAR_RWE
|
2363 MLX5_QP_OPTPAR_RAE
|
2369 static int ib_nr_to_mlx5_nr(int ib_mask
)
2374 case IB_QP_CUR_STATE
:
2376 case IB_QP_EN_SQD_ASYNC_NOTIFY
:
2378 case IB_QP_ACCESS_FLAGS
:
2379 return MLX5_QP_OPTPAR_RWE
| MLX5_QP_OPTPAR_RRE
|
2381 case IB_QP_PKEY_INDEX
:
2382 return MLX5_QP_OPTPAR_PKEY_INDEX
;
2384 return MLX5_QP_OPTPAR_PRI_PORT
;
2386 return MLX5_QP_OPTPAR_Q_KEY
;
2388 return MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH
|
2389 MLX5_QP_OPTPAR_PRI_PORT
;
2390 case IB_QP_PATH_MTU
:
2393 return MLX5_QP_OPTPAR_ACK_TIMEOUT
;
2394 case IB_QP_RETRY_CNT
:
2395 return MLX5_QP_OPTPAR_RETRY_COUNT
;
2396 case IB_QP_RNR_RETRY
:
2397 return MLX5_QP_OPTPAR_RNR_RETRY
;
2400 case IB_QP_MAX_QP_RD_ATOMIC
:
2401 return MLX5_QP_OPTPAR_SRA_MAX
;
2402 case IB_QP_ALT_PATH
:
2403 return MLX5_QP_OPTPAR_ALT_ADDR_PATH
;
2404 case IB_QP_MIN_RNR_TIMER
:
2405 return MLX5_QP_OPTPAR_RNR_TIMEOUT
;
2408 case IB_QP_MAX_DEST_RD_ATOMIC
:
2409 return MLX5_QP_OPTPAR_RRA_MAX
| MLX5_QP_OPTPAR_RWE
|
2410 MLX5_QP_OPTPAR_RRE
| MLX5_QP_OPTPAR_RAE
;
2411 case IB_QP_PATH_MIG_STATE
:
2412 return MLX5_QP_OPTPAR_PM_STATE
;
2415 case IB_QP_DEST_QPN
:
2421 static int ib_mask_to_mlx5_opt(int ib_mask
)
2426 for (i
= 0; i
< 8 * sizeof(int); i
++) {
2427 if ((1 << i
) & ib_mask
)
2428 result
|= ib_nr_to_mlx5_nr(1 << i
);
2434 static int modify_raw_packet_qp_rq(struct mlx5_ib_dev
*dev
,
2435 struct mlx5_ib_rq
*rq
, int new_state
,
2436 const struct mlx5_modify_raw_qp_param
*raw_qp_param
)
2443 inlen
= MLX5_ST_SZ_BYTES(modify_rq_in
);
2444 in
= mlx5_vzalloc(inlen
);
2448 MLX5_SET(modify_rq_in
, in
, rq_state
, rq
->state
);
2450 rqc
= MLX5_ADDR_OF(modify_rq_in
, in
, ctx
);
2451 MLX5_SET(rqc
, rqc
, state
, new_state
);
2453 if (raw_qp_param
->set_mask
& MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID
) {
2454 if (MLX5_CAP_GEN(dev
->mdev
, modify_rq_counter_set_id
)) {
2455 MLX5_SET64(modify_rq_in
, in
, modify_bitmask
,
2456 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_MODIFY_RQ_COUNTER_SET_ID
);
2457 MLX5_SET(rqc
, rqc
, counter_set_id
, raw_qp_param
->rq_q_ctr_id
);
2459 pr_info_once("%s: RAW PACKET QP counters are not supported on current FW\n",
2463 err
= mlx5_core_modify_rq(dev
->mdev
, rq
->base
.mqp
.qpn
, in
, inlen
);
2467 rq
->state
= new_state
;
2474 static int modify_raw_packet_qp_sq(struct mlx5_core_dev
*dev
,
2475 struct mlx5_ib_sq
*sq
,
2477 const struct mlx5_modify_raw_qp_param
*raw_qp_param
)
2479 struct mlx5_ib_qp
*ibqp
= sq
->base
.container_mibqp
;
2480 u32 old_rate
= ibqp
->rate_limit
;
2481 u32 new_rate
= old_rate
;
2488 inlen
= MLX5_ST_SZ_BYTES(modify_sq_in
);
2489 in
= mlx5_vzalloc(inlen
);
2493 MLX5_SET(modify_sq_in
, in
, sq_state
, sq
->state
);
2495 sqc
= MLX5_ADDR_OF(modify_sq_in
, in
, ctx
);
2496 MLX5_SET(sqc
, sqc
, state
, new_state
);
2498 if (raw_qp_param
->set_mask
& MLX5_RAW_QP_RATE_LIMIT
) {
2499 if (new_state
!= MLX5_SQC_STATE_RDY
)
2500 pr_warn("%s: Rate limit can only be changed when SQ is moving to RDY\n",
2503 new_rate
= raw_qp_param
->rate_limit
;
2506 if (old_rate
!= new_rate
) {
2508 err
= mlx5_rl_add_rate(dev
, new_rate
, &rl_index
);
2510 pr_err("Failed configuring rate %u: %d\n",
2516 MLX5_SET64(modify_sq_in
, in
, modify_bitmask
, 1);
2517 MLX5_SET(sqc
, sqc
, packet_pacing_rate_limit_index
, rl_index
);
2520 err
= mlx5_core_modify_sq(dev
, sq
->base
.mqp
.qpn
, in
, inlen
);
2522 /* Remove new rate from table if failed */
2524 old_rate
!= new_rate
)
2525 mlx5_rl_remove_rate(dev
, new_rate
);
2529 /* Only remove the old rate after new rate was set */
2531 (old_rate
!= new_rate
)) ||
2532 (new_state
!= MLX5_SQC_STATE_RDY
))
2533 mlx5_rl_remove_rate(dev
, old_rate
);
2535 ibqp
->rate_limit
= new_rate
;
2536 sq
->state
= new_state
;
2543 static int modify_raw_packet_qp(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
2544 const struct mlx5_modify_raw_qp_param
*raw_qp_param
,
2547 struct mlx5_ib_raw_packet_qp
*raw_packet_qp
= &qp
->raw_packet_qp
;
2548 struct mlx5_ib_rq
*rq
= &raw_packet_qp
->rq
;
2549 struct mlx5_ib_sq
*sq
= &raw_packet_qp
->sq
;
2550 int modify_rq
= !!qp
->rq
.wqe_cnt
;
2551 int modify_sq
= !!qp
->sq
.wqe_cnt
;
2556 switch (raw_qp_param
->operation
) {
2557 case MLX5_CMD_OP_RST2INIT_QP
:
2558 rq_state
= MLX5_RQC_STATE_RDY
;
2559 sq_state
= MLX5_SQC_STATE_RDY
;
2561 case MLX5_CMD_OP_2ERR_QP
:
2562 rq_state
= MLX5_RQC_STATE_ERR
;
2563 sq_state
= MLX5_SQC_STATE_ERR
;
2565 case MLX5_CMD_OP_2RST_QP
:
2566 rq_state
= MLX5_RQC_STATE_RST
;
2567 sq_state
= MLX5_SQC_STATE_RST
;
2569 case MLX5_CMD_OP_RTR2RTS_QP
:
2570 case MLX5_CMD_OP_RTS2RTS_QP
:
2571 if (raw_qp_param
->set_mask
==
2572 MLX5_RAW_QP_RATE_LIMIT
) {
2574 sq_state
= sq
->state
;
2576 return raw_qp_param
->set_mask
? -EINVAL
: 0;
2579 case MLX5_CMD_OP_INIT2INIT_QP
:
2580 case MLX5_CMD_OP_INIT2RTR_QP
:
2581 if (raw_qp_param
->set_mask
)
2591 err
= modify_raw_packet_qp_rq(dev
, rq
, rq_state
, raw_qp_param
);
2598 err
= modify_raw_packet_tx_affinity(dev
->mdev
, sq
,
2604 return modify_raw_packet_qp_sq(dev
->mdev
, sq
, sq_state
, raw_qp_param
);
2610 static int __mlx5_ib_modify_qp(struct ib_qp
*ibqp
,
2611 const struct ib_qp_attr
*attr
, int attr_mask
,
2612 enum ib_qp_state cur_state
, enum ib_qp_state new_state
)
2614 static const u16 optab
[MLX5_QP_NUM_STATE
][MLX5_QP_NUM_STATE
] = {
2615 [MLX5_QP_STATE_RST
] = {
2616 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
2617 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
2618 [MLX5_QP_STATE_INIT
] = MLX5_CMD_OP_RST2INIT_QP
,
2620 [MLX5_QP_STATE_INIT
] = {
2621 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
2622 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
2623 [MLX5_QP_STATE_INIT
] = MLX5_CMD_OP_INIT2INIT_QP
,
2624 [MLX5_QP_STATE_RTR
] = MLX5_CMD_OP_INIT2RTR_QP
,
2626 [MLX5_QP_STATE_RTR
] = {
2627 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
2628 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
2629 [MLX5_QP_STATE_RTS
] = MLX5_CMD_OP_RTR2RTS_QP
,
2631 [MLX5_QP_STATE_RTS
] = {
2632 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
2633 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
2634 [MLX5_QP_STATE_RTS
] = MLX5_CMD_OP_RTS2RTS_QP
,
2636 [MLX5_QP_STATE_SQD
] = {
2637 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
2638 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
2640 [MLX5_QP_STATE_SQER
] = {
2641 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
2642 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
2643 [MLX5_QP_STATE_RTS
] = MLX5_CMD_OP_SQERR2RTS_QP
,
2645 [MLX5_QP_STATE_ERR
] = {
2646 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
2647 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
2651 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
2652 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
2653 struct mlx5_ib_qp_base
*base
= &qp
->trans_qp
.base
;
2654 struct mlx5_ib_cq
*send_cq
, *recv_cq
;
2655 struct mlx5_qp_context
*context
;
2656 struct mlx5_ib_pd
*pd
;
2657 struct mlx5_ib_port
*mibport
= NULL
;
2658 enum mlx5_qp_state mlx5_cur
, mlx5_new
;
2659 enum mlx5_qp_optpar optpar
;
2665 context
= kzalloc(sizeof(*context
), GFP_KERNEL
);
2669 err
= to_mlx5_st(ibqp
->qp_type
);
2671 mlx5_ib_dbg(dev
, "unsupported qp type %d\n", ibqp
->qp_type
);
2675 context
->flags
= cpu_to_be32(err
<< 16);
2677 if (!(attr_mask
& IB_QP_PATH_MIG_STATE
)) {
2678 context
->flags
|= cpu_to_be32(MLX5_QP_PM_MIGRATED
<< 11);
2680 switch (attr
->path_mig_state
) {
2681 case IB_MIG_MIGRATED
:
2682 context
->flags
|= cpu_to_be32(MLX5_QP_PM_MIGRATED
<< 11);
2685 context
->flags
|= cpu_to_be32(MLX5_QP_PM_REARM
<< 11);
2688 context
->flags
|= cpu_to_be32(MLX5_QP_PM_ARMED
<< 11);
2693 if ((cur_state
== IB_QPS_RESET
) && (new_state
== IB_QPS_INIT
)) {
2694 if ((ibqp
->qp_type
== IB_QPT_RC
) ||
2695 (ibqp
->qp_type
== IB_QPT_UD
&&
2696 !(qp
->flags
& MLX5_IB_QP_SQPN_QP1
)) ||
2697 (ibqp
->qp_type
== IB_QPT_UC
) ||
2698 (ibqp
->qp_type
== IB_QPT_RAW_PACKET
) ||
2699 (ibqp
->qp_type
== IB_QPT_XRC_INI
) ||
2700 (ibqp
->qp_type
== IB_QPT_XRC_TGT
)) {
2701 if (mlx5_lag_is_active(dev
->mdev
)) {
2702 tx_affinity
= (unsigned int)atomic_add_return(1,
2703 &dev
->roce
.next_port
) %
2705 context
->flags
|= cpu_to_be32(tx_affinity
<< 24);
2710 if (is_sqp(ibqp
->qp_type
)) {
2711 context
->mtu_msgmax
= (IB_MTU_256
<< 5) | 8;
2712 } else if (ibqp
->qp_type
== IB_QPT_UD
||
2713 ibqp
->qp_type
== MLX5_IB_QPT_REG_UMR
) {
2714 context
->mtu_msgmax
= (IB_MTU_4096
<< 5) | 12;
2715 } else if (attr_mask
& IB_QP_PATH_MTU
) {
2716 if (attr
->path_mtu
< IB_MTU_256
||
2717 attr
->path_mtu
> IB_MTU_4096
) {
2718 mlx5_ib_warn(dev
, "invalid mtu %d\n", attr
->path_mtu
);
2722 context
->mtu_msgmax
= (attr
->path_mtu
<< 5) |
2723 (u8
)MLX5_CAP_GEN(dev
->mdev
, log_max_msg
);
2726 if (attr_mask
& IB_QP_DEST_QPN
)
2727 context
->log_pg_sz_remote_qpn
= cpu_to_be32(attr
->dest_qp_num
);
2729 if (attr_mask
& IB_QP_PKEY_INDEX
)
2730 context
->pri_path
.pkey_index
= cpu_to_be16(attr
->pkey_index
);
2732 /* todo implement counter_index functionality */
2734 if (is_sqp(ibqp
->qp_type
))
2735 context
->pri_path
.port
= qp
->port
;
2737 if (attr_mask
& IB_QP_PORT
)
2738 context
->pri_path
.port
= attr
->port_num
;
2740 if (attr_mask
& IB_QP_AV
) {
2741 err
= mlx5_set_path(dev
, qp
, &attr
->ah_attr
, &context
->pri_path
,
2742 attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
,
2743 attr_mask
, 0, attr
, false);
2748 if (attr_mask
& IB_QP_TIMEOUT
)
2749 context
->pri_path
.ackto_lt
|= attr
->timeout
<< 3;
2751 if (attr_mask
& IB_QP_ALT_PATH
) {
2752 err
= mlx5_set_path(dev
, qp
, &attr
->alt_ah_attr
,
2755 attr_mask
| IB_QP_PKEY_INDEX
| IB_QP_TIMEOUT
,
2762 get_cqs(qp
->ibqp
.qp_type
, qp
->ibqp
.send_cq
, qp
->ibqp
.recv_cq
,
2763 &send_cq
, &recv_cq
);
2765 context
->flags_pd
= cpu_to_be32(pd
? pd
->pdn
: to_mpd(dev
->devr
.p0
)->pdn
);
2766 context
->cqn_send
= send_cq
? cpu_to_be32(send_cq
->mcq
.cqn
) : 0;
2767 context
->cqn_recv
= recv_cq
? cpu_to_be32(recv_cq
->mcq
.cqn
) : 0;
2768 context
->params1
= cpu_to_be32(MLX5_IB_ACK_REQ_FREQ
<< 28);
2770 if (attr_mask
& IB_QP_RNR_RETRY
)
2771 context
->params1
|= cpu_to_be32(attr
->rnr_retry
<< 13);
2773 if (attr_mask
& IB_QP_RETRY_CNT
)
2774 context
->params1
|= cpu_to_be32(attr
->retry_cnt
<< 16);
2776 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
) {
2777 if (attr
->max_rd_atomic
)
2779 cpu_to_be32(fls(attr
->max_rd_atomic
- 1) << 21);
2782 if (attr_mask
& IB_QP_SQ_PSN
)
2783 context
->next_send_psn
= cpu_to_be32(attr
->sq_psn
);
2785 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
) {
2786 if (attr
->max_dest_rd_atomic
)
2788 cpu_to_be32(fls(attr
->max_dest_rd_atomic
- 1) << 21);
2791 if (attr_mask
& (IB_QP_ACCESS_FLAGS
| IB_QP_MAX_DEST_RD_ATOMIC
))
2792 context
->params2
|= to_mlx5_access_flags(qp
, attr
, attr_mask
);
2794 if (attr_mask
& IB_QP_MIN_RNR_TIMER
)
2795 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->min_rnr_timer
<< 24);
2797 if (attr_mask
& IB_QP_RQ_PSN
)
2798 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->rq_psn
);
2800 if (attr_mask
& IB_QP_QKEY
)
2801 context
->qkey
= cpu_to_be32(attr
->qkey
);
2803 if (qp
->rq
.wqe_cnt
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
2804 context
->db_rec_addr
= cpu_to_be64(qp
->db
.dma
);
2806 if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
) {
2807 u8 port_num
= (attr_mask
& IB_QP_PORT
? attr
->port_num
:
2809 mibport
= &dev
->port
[port_num
];
2810 context
->qp_counter_set_usr_page
|=
2811 cpu_to_be32((u32
)(mibport
->q_cnt_id
) << 24);
2814 if (!ibqp
->uobject
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
2815 context
->sq_crq_size
|= cpu_to_be16(1 << 4);
2817 if (qp
->flags
& MLX5_IB_QP_SQPN_QP1
)
2818 context
->deth_sqpn
= cpu_to_be32(1);
2820 mlx5_cur
= to_mlx5_state(cur_state
);
2821 mlx5_new
= to_mlx5_state(new_state
);
2822 mlx5_st
= to_mlx5_st(ibqp
->qp_type
);
2826 /* If moving to a reset or error state, we must disable page faults on
2827 * this QP and flush all current page faults. Otherwise a stale page
2828 * fault may attempt to work on this QP after it is reset and moved
2829 * again to RTS, and may cause the driver and the device to get out of
2831 if (cur_state
!= IB_QPS_RESET
&& cur_state
!= IB_QPS_ERR
&&
2832 (new_state
== IB_QPS_RESET
|| new_state
== IB_QPS_ERR
) &&
2833 (qp
->ibqp
.qp_type
!= IB_QPT_RAW_PACKET
))
2834 mlx5_ib_qp_disable_pagefaults(qp
);
2836 if (mlx5_cur
>= MLX5_QP_NUM_STATE
|| mlx5_new
>= MLX5_QP_NUM_STATE
||
2837 !optab
[mlx5_cur
][mlx5_new
])
2840 op
= optab
[mlx5_cur
][mlx5_new
];
2841 optpar
= ib_mask_to_mlx5_opt(attr_mask
);
2842 optpar
&= opt_mask
[mlx5_cur
][mlx5_new
][mlx5_st
];
2844 if (qp
->ibqp
.qp_type
== IB_QPT_RAW_PACKET
) {
2845 struct mlx5_modify_raw_qp_param raw_qp_param
= {};
2847 raw_qp_param
.operation
= op
;
2848 if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
) {
2849 raw_qp_param
.rq_q_ctr_id
= mibport
->q_cnt_id
;
2850 raw_qp_param
.set_mask
|= MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID
;
2853 if (attr_mask
& IB_QP_RATE_LIMIT
) {
2854 raw_qp_param
.rate_limit
= attr
->rate_limit
;
2855 raw_qp_param
.set_mask
|= MLX5_RAW_QP_RATE_LIMIT
;
2858 err
= modify_raw_packet_qp(dev
, qp
, &raw_qp_param
, tx_affinity
);
2860 err
= mlx5_core_qp_modify(dev
->mdev
, op
, optpar
, context
,
2867 if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
&&
2868 (qp
->ibqp
.qp_type
!= IB_QPT_RAW_PACKET
))
2869 mlx5_ib_qp_enable_pagefaults(qp
);
2871 qp
->state
= new_state
;
2873 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
2874 qp
->trans_qp
.atomic_rd_en
= attr
->qp_access_flags
;
2875 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
2876 qp
->trans_qp
.resp_depth
= attr
->max_dest_rd_atomic
;
2877 if (attr_mask
& IB_QP_PORT
)
2878 qp
->port
= attr
->port_num
;
2879 if (attr_mask
& IB_QP_ALT_PATH
)
2880 qp
->trans_qp
.alt_port
= attr
->alt_port_num
;
2883 * If we moved a kernel QP to RESET, clean up all old CQ
2884 * entries and reinitialize the QP.
2886 if (new_state
== IB_QPS_RESET
&& !ibqp
->uobject
) {
2887 mlx5_ib_cq_clean(recv_cq
, base
->mqp
.qpn
,
2888 ibqp
->srq
? to_msrq(ibqp
->srq
) : NULL
);
2889 if (send_cq
!= recv_cq
)
2890 mlx5_ib_cq_clean(send_cq
, base
->mqp
.qpn
, NULL
);
2896 qp
->sq
.cur_post
= 0;
2897 qp
->sq
.last_poll
= 0;
2898 qp
->db
.db
[MLX5_RCV_DBR
] = 0;
2899 qp
->db
.db
[MLX5_SND_DBR
] = 0;
2907 int mlx5_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
2908 int attr_mask
, struct ib_udata
*udata
)
2910 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
2911 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
2912 enum ib_qp_type qp_type
;
2913 enum ib_qp_state cur_state
, new_state
;
2916 enum rdma_link_layer ll
= IB_LINK_LAYER_UNSPECIFIED
;
2918 if (ibqp
->rwq_ind_tbl
)
2921 if (unlikely(ibqp
->qp_type
== IB_QPT_GSI
))
2922 return mlx5_ib_gsi_modify_qp(ibqp
, attr
, attr_mask
);
2924 qp_type
= (unlikely(ibqp
->qp_type
== MLX5_IB_QPT_HW_GSI
)) ?
2925 IB_QPT_GSI
: ibqp
->qp_type
;
2927 mutex_lock(&qp
->mutex
);
2929 cur_state
= attr_mask
& IB_QP_CUR_STATE
? attr
->cur_qp_state
: qp
->state
;
2930 new_state
= attr_mask
& IB_QP_STATE
? attr
->qp_state
: cur_state
;
2932 if (!(cur_state
== new_state
&& cur_state
== IB_QPS_RESET
)) {
2933 port
= attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
;
2934 ll
= dev
->ib_dev
.get_link_layer(&dev
->ib_dev
, port
);
2937 if (qp_type
!= MLX5_IB_QPT_REG_UMR
&&
2938 !ib_modify_qp_is_ok(cur_state
, new_state
, qp_type
, attr_mask
, ll
)) {
2939 mlx5_ib_dbg(dev
, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n",
2940 cur_state
, new_state
, ibqp
->qp_type
, attr_mask
);
2944 if ((attr_mask
& IB_QP_PORT
) &&
2945 (attr
->port_num
== 0 ||
2946 attr
->port_num
> MLX5_CAP_GEN(dev
->mdev
, num_ports
))) {
2947 mlx5_ib_dbg(dev
, "invalid port number %d. number of ports is %d\n",
2948 attr
->port_num
, dev
->num_ports
);
2952 if (attr_mask
& IB_QP_PKEY_INDEX
) {
2953 port
= attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
;
2954 if (attr
->pkey_index
>=
2955 dev
->mdev
->port_caps
[port
- 1].pkey_table_len
) {
2956 mlx5_ib_dbg(dev
, "invalid pkey index %d\n",
2962 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
&&
2963 attr
->max_rd_atomic
>
2964 (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_ra_res_qp
))) {
2965 mlx5_ib_dbg(dev
, "invalid max_rd_atomic value %d\n",
2966 attr
->max_rd_atomic
);
2970 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
&&
2971 attr
->max_dest_rd_atomic
>
2972 (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_ra_req_qp
))) {
2973 mlx5_ib_dbg(dev
, "invalid max_dest_rd_atomic value %d\n",
2974 attr
->max_dest_rd_atomic
);
2978 if (cur_state
== new_state
&& cur_state
== IB_QPS_RESET
) {
2983 err
= __mlx5_ib_modify_qp(ibqp
, attr
, attr_mask
, cur_state
, new_state
);
2986 mutex_unlock(&qp
->mutex
);
2990 static int mlx5_wq_overflow(struct mlx5_ib_wq
*wq
, int nreq
, struct ib_cq
*ib_cq
)
2992 struct mlx5_ib_cq
*cq
;
2995 cur
= wq
->head
- wq
->tail
;
2996 if (likely(cur
+ nreq
< wq
->max_post
))
3000 spin_lock(&cq
->lock
);
3001 cur
= wq
->head
- wq
->tail
;
3002 spin_unlock(&cq
->lock
);
3004 return cur
+ nreq
>= wq
->max_post
;
3007 static __always_inline
void set_raddr_seg(struct mlx5_wqe_raddr_seg
*rseg
,
3008 u64 remote_addr
, u32 rkey
)
3010 rseg
->raddr
= cpu_to_be64(remote_addr
);
3011 rseg
->rkey
= cpu_to_be32(rkey
);
3015 static void *set_eth_seg(struct mlx5_wqe_eth_seg
*eseg
,
3016 struct ib_send_wr
*wr
, void *qend
,
3017 struct mlx5_ib_qp
*qp
, int *size
)
3021 memset(eseg
, 0, sizeof(struct mlx5_wqe_eth_seg
));
3023 if (wr
->send_flags
& IB_SEND_IP_CSUM
)
3024 eseg
->cs_flags
= MLX5_ETH_WQE_L3_CSUM
|
3025 MLX5_ETH_WQE_L4_CSUM
;
3027 seg
+= sizeof(struct mlx5_wqe_eth_seg
);
3028 *size
+= sizeof(struct mlx5_wqe_eth_seg
) / 16;
3030 if (wr
->opcode
== IB_WR_LSO
) {
3031 struct ib_ud_wr
*ud_wr
= container_of(wr
, struct ib_ud_wr
, wr
);
3032 int size_of_inl_hdr_start
= sizeof(eseg
->inline_hdr_start
);
3033 u64 left
, leftlen
, copysz
;
3034 void *pdata
= ud_wr
->header
;
3037 eseg
->mss
= cpu_to_be16(ud_wr
->mss
);
3038 eseg
->inline_hdr_sz
= cpu_to_be16(left
);
3041 * check if there is space till the end of queue, if yes,
3042 * copy all in one shot, otherwise copy till the end of queue,
3043 * rollback and than the copy the left
3045 leftlen
= qend
- (void *)eseg
->inline_hdr_start
;
3046 copysz
= min_t(u64
, leftlen
, left
);
3048 memcpy(seg
- size_of_inl_hdr_start
, pdata
, copysz
);
3050 if (likely(copysz
> size_of_inl_hdr_start
)) {
3051 seg
+= ALIGN(copysz
- size_of_inl_hdr_start
, 16);
3052 *size
+= ALIGN(copysz
- size_of_inl_hdr_start
, 16) / 16;
3055 if (unlikely(copysz
< left
)) { /* the last wqe in the queue */
3056 seg
= mlx5_get_send_wqe(qp
, 0);
3059 memcpy(seg
, pdata
, left
);
3060 seg
+= ALIGN(left
, 16);
3061 *size
+= ALIGN(left
, 16) / 16;
3068 static void set_datagram_seg(struct mlx5_wqe_datagram_seg
*dseg
,
3069 struct ib_send_wr
*wr
)
3071 memcpy(&dseg
->av
, &to_mah(ud_wr(wr
)->ah
)->av
, sizeof(struct mlx5_av
));
3072 dseg
->av
.dqp_dct
= cpu_to_be32(ud_wr(wr
)->remote_qpn
| MLX5_EXTENDED_UD_AV
);
3073 dseg
->av
.key
.qkey
.qkey
= cpu_to_be32(ud_wr(wr
)->remote_qkey
);
3076 static void set_data_ptr_seg(struct mlx5_wqe_data_seg
*dseg
, struct ib_sge
*sg
)
3078 dseg
->byte_count
= cpu_to_be32(sg
->length
);
3079 dseg
->lkey
= cpu_to_be32(sg
->lkey
);
3080 dseg
->addr
= cpu_to_be64(sg
->addr
);
3083 static __be16
get_klm_octo(int npages
)
3085 return cpu_to_be16(ALIGN(npages
, 8) / 2);
3088 static __be64
frwr_mkey_mask(void)
3092 result
= MLX5_MKEY_MASK_LEN
|
3093 MLX5_MKEY_MASK_PAGE_SIZE
|
3094 MLX5_MKEY_MASK_START_ADDR
|
3095 MLX5_MKEY_MASK_EN_RINVAL
|
3096 MLX5_MKEY_MASK_KEY
|
3102 MLX5_MKEY_MASK_SMALL_FENCE
|
3103 MLX5_MKEY_MASK_FREE
;
3105 return cpu_to_be64(result
);
3108 static __be64
sig_mkey_mask(void)
3112 result
= MLX5_MKEY_MASK_LEN
|
3113 MLX5_MKEY_MASK_PAGE_SIZE
|
3114 MLX5_MKEY_MASK_START_ADDR
|
3115 MLX5_MKEY_MASK_EN_SIGERR
|
3116 MLX5_MKEY_MASK_EN_RINVAL
|
3117 MLX5_MKEY_MASK_KEY
|
3122 MLX5_MKEY_MASK_SMALL_FENCE
|
3123 MLX5_MKEY_MASK_FREE
|
3124 MLX5_MKEY_MASK_BSF_EN
;
3126 return cpu_to_be64(result
);
3129 static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg
*umr
,
3130 struct mlx5_ib_mr
*mr
)
3132 int ndescs
= mr
->ndescs
;
3134 memset(umr
, 0, sizeof(*umr
));
3136 if (mr
->access_mode
== MLX5_MKC_ACCESS_MODE_KLMS
)
3137 /* KLMs take twice the size of MTTs */
3140 umr
->flags
= MLX5_UMR_CHECK_NOT_FREE
;
3141 umr
->klm_octowords
= get_klm_octo(ndescs
);
3142 umr
->mkey_mask
= frwr_mkey_mask();
3145 static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg
*umr
)
3147 memset(umr
, 0, sizeof(*umr
));
3148 umr
->mkey_mask
= cpu_to_be64(MLX5_MKEY_MASK_FREE
);
3149 umr
->flags
= MLX5_UMR_INLINE
;
3152 static __be64
get_umr_reg_mr_mask(int atomic
)
3156 result
= MLX5_MKEY_MASK_LEN
|
3157 MLX5_MKEY_MASK_PAGE_SIZE
|
3158 MLX5_MKEY_MASK_START_ADDR
|
3162 MLX5_MKEY_MASK_KEY
|
3165 MLX5_MKEY_MASK_FREE
;
3168 result
|= MLX5_MKEY_MASK_A
;
3170 return cpu_to_be64(result
);
3173 static __be64
get_umr_unreg_mr_mask(void)
3177 result
= MLX5_MKEY_MASK_FREE
;
3179 return cpu_to_be64(result
);
3182 static __be64
get_umr_update_mtt_mask(void)
3186 result
= MLX5_MKEY_MASK_FREE
;
3188 return cpu_to_be64(result
);
3191 static __be64
get_umr_update_translation_mask(void)
3195 result
= MLX5_MKEY_MASK_LEN
|
3196 MLX5_MKEY_MASK_PAGE_SIZE
|
3197 MLX5_MKEY_MASK_START_ADDR
|
3198 MLX5_MKEY_MASK_KEY
|
3199 MLX5_MKEY_MASK_FREE
;
3201 return cpu_to_be64(result
);
3204 static __be64
get_umr_update_access_mask(void)
3208 result
= MLX5_MKEY_MASK_LW
|
3212 MLX5_MKEY_MASK_KEY
|
3213 MLX5_MKEY_MASK_FREE
;
3215 return cpu_to_be64(result
);
3218 static __be64
get_umr_update_pd_mask(void)
3222 result
= MLX5_MKEY_MASK_PD
|
3223 MLX5_MKEY_MASK_KEY
|
3224 MLX5_MKEY_MASK_FREE
;
3226 return cpu_to_be64(result
);
3229 static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg
*umr
,
3230 struct ib_send_wr
*wr
, int atomic
)
3232 struct mlx5_umr_wr
*umrwr
= umr_wr(wr
);
3234 memset(umr
, 0, sizeof(*umr
));
3236 if (wr
->send_flags
& MLX5_IB_SEND_UMR_FAIL_IF_FREE
)
3237 umr
->flags
= MLX5_UMR_CHECK_FREE
; /* fail if free */
3239 umr
->flags
= MLX5_UMR_CHECK_NOT_FREE
; /* fail if not free */
3241 if (!(wr
->send_flags
& MLX5_IB_SEND_UMR_UNREG
)) {
3242 umr
->klm_octowords
= get_klm_octo(umrwr
->npages
);
3243 if (wr
->send_flags
& MLX5_IB_SEND_UMR_UPDATE_MTT
) {
3244 umr
->mkey_mask
= get_umr_update_mtt_mask();
3245 umr
->bsf_octowords
= get_klm_octo(umrwr
->target
.offset
);
3246 umr
->flags
|= MLX5_UMR_TRANSLATION_OFFSET_EN
;
3248 if (wr
->send_flags
& MLX5_IB_SEND_UMR_UPDATE_TRANSLATION
)
3249 umr
->mkey_mask
|= get_umr_update_translation_mask();
3250 if (wr
->send_flags
& MLX5_IB_SEND_UMR_UPDATE_ACCESS
)
3251 umr
->mkey_mask
|= get_umr_update_access_mask();
3252 if (wr
->send_flags
& MLX5_IB_SEND_UMR_UPDATE_PD
)
3253 umr
->mkey_mask
|= get_umr_update_pd_mask();
3254 if (!umr
->mkey_mask
)
3255 umr
->mkey_mask
= get_umr_reg_mr_mask(atomic
);
3257 umr
->mkey_mask
= get_umr_unreg_mr_mask();
3261 umr
->flags
|= MLX5_UMR_INLINE
;
3264 static u8
get_umr_flags(int acc
)
3266 return (acc
& IB_ACCESS_REMOTE_ATOMIC
? MLX5_PERM_ATOMIC
: 0) |
3267 (acc
& IB_ACCESS_REMOTE_WRITE
? MLX5_PERM_REMOTE_WRITE
: 0) |
3268 (acc
& IB_ACCESS_REMOTE_READ
? MLX5_PERM_REMOTE_READ
: 0) |
3269 (acc
& IB_ACCESS_LOCAL_WRITE
? MLX5_PERM_LOCAL_WRITE
: 0) |
3270 MLX5_PERM_LOCAL_READ
| MLX5_PERM_UMR_EN
;
3273 static void set_reg_mkey_seg(struct mlx5_mkey_seg
*seg
,
3274 struct mlx5_ib_mr
*mr
,
3275 u32 key
, int access
)
3277 int ndescs
= ALIGN(mr
->ndescs
, 8) >> 1;
3279 memset(seg
, 0, sizeof(*seg
));
3281 if (mr
->access_mode
== MLX5_MKC_ACCESS_MODE_MTT
)
3282 seg
->log2_page_size
= ilog2(mr
->ibmr
.page_size
);
3283 else if (mr
->access_mode
== MLX5_MKC_ACCESS_MODE_KLMS
)
3284 /* KLMs take twice the size of MTTs */
3287 seg
->flags
= get_umr_flags(access
) | mr
->access_mode
;
3288 seg
->qpn_mkey7_0
= cpu_to_be32((key
& 0xff) | 0xffffff00);
3289 seg
->flags_pd
= cpu_to_be32(MLX5_MKEY_REMOTE_INVAL
);
3290 seg
->start_addr
= cpu_to_be64(mr
->ibmr
.iova
);
3291 seg
->len
= cpu_to_be64(mr
->ibmr
.length
);
3292 seg
->xlt_oct_size
= cpu_to_be32(ndescs
);
3295 static void set_linv_mkey_seg(struct mlx5_mkey_seg
*seg
)
3297 memset(seg
, 0, sizeof(*seg
));
3298 seg
->status
= MLX5_MKEY_STATUS_FREE
;
3301 static void set_reg_mkey_segment(struct mlx5_mkey_seg
*seg
, struct ib_send_wr
*wr
)
3303 struct mlx5_umr_wr
*umrwr
= umr_wr(wr
);
3305 memset(seg
, 0, sizeof(*seg
));
3306 if (wr
->send_flags
& MLX5_IB_SEND_UMR_UNREG
) {
3307 seg
->status
= MLX5_MKEY_STATUS_FREE
;
3311 seg
->flags
= convert_access(umrwr
->access_flags
);
3312 if (!(wr
->send_flags
& MLX5_IB_SEND_UMR_UPDATE_MTT
)) {
3314 seg
->flags_pd
= cpu_to_be32(to_mpd(umrwr
->pd
)->pdn
);
3315 seg
->start_addr
= cpu_to_be64(umrwr
->target
.virt_addr
);
3317 seg
->len
= cpu_to_be64(umrwr
->length
);
3318 seg
->log2_page_size
= umrwr
->page_shift
;
3319 seg
->qpn_mkey7_0
= cpu_to_be32(0xffffff00 |
3320 mlx5_mkey_variant(umrwr
->mkey
));
3323 static void set_reg_data_seg(struct mlx5_wqe_data_seg
*dseg
,
3324 struct mlx5_ib_mr
*mr
,
3325 struct mlx5_ib_pd
*pd
)
3327 int bcount
= mr
->desc_size
* mr
->ndescs
;
3329 dseg
->addr
= cpu_to_be64(mr
->desc_map
);
3330 dseg
->byte_count
= cpu_to_be32(ALIGN(bcount
, 64));
3331 dseg
->lkey
= cpu_to_be32(pd
->ibpd
.local_dma_lkey
);
3334 static __be32
send_ieth(struct ib_send_wr
*wr
)
3336 switch (wr
->opcode
) {
3337 case IB_WR_SEND_WITH_IMM
:
3338 case IB_WR_RDMA_WRITE_WITH_IMM
:
3339 return wr
->ex
.imm_data
;
3341 case IB_WR_SEND_WITH_INV
:
3342 return cpu_to_be32(wr
->ex
.invalidate_rkey
);
3349 static u8
calc_sig(void *wqe
, int size
)
3355 for (i
= 0; i
< size
; i
++)
3361 static u8
wq_sig(void *wqe
)
3363 return calc_sig(wqe
, (*((u8
*)wqe
+ 8) & 0x3f) << 4);
3366 static int set_data_inl_seg(struct mlx5_ib_qp
*qp
, struct ib_send_wr
*wr
,
3369 struct mlx5_wqe_inline_seg
*seg
;
3370 void *qend
= qp
->sq
.qend
;
3378 wqe
+= sizeof(*seg
);
3379 for (i
= 0; i
< wr
->num_sge
; i
++) {
3380 addr
= (void *)(unsigned long)(wr
->sg_list
[i
].addr
);
3381 len
= wr
->sg_list
[i
].length
;
3384 if (unlikely(inl
> qp
->max_inline_data
))
3387 if (unlikely(wqe
+ len
> qend
)) {
3389 memcpy(wqe
, addr
, copy
);
3392 wqe
= mlx5_get_send_wqe(qp
, 0);
3394 memcpy(wqe
, addr
, len
);
3398 seg
->byte_count
= cpu_to_be32(inl
| MLX5_INLINE_SEG
);
3400 *sz
= ALIGN(inl
+ sizeof(seg
->byte_count
), 16) / 16;
3405 static u16
prot_field_size(enum ib_signature_type type
)
3408 case IB_SIG_TYPE_T10_DIF
:
3409 return MLX5_DIF_SIZE
;
3415 static u8
bs_selector(int block_size
)
3417 switch (block_size
) {
3418 case 512: return 0x1;
3419 case 520: return 0x2;
3420 case 4096: return 0x3;
3421 case 4160: return 0x4;
3422 case 1073741824: return 0x5;
3427 static void mlx5_fill_inl_bsf(struct ib_sig_domain
*domain
,
3428 struct mlx5_bsf_inl
*inl
)
3430 /* Valid inline section and allow BSF refresh */
3431 inl
->vld_refresh
= cpu_to_be16(MLX5_BSF_INL_VALID
|
3432 MLX5_BSF_REFRESH_DIF
);
3433 inl
->dif_apptag
= cpu_to_be16(domain
->sig
.dif
.app_tag
);
3434 inl
->dif_reftag
= cpu_to_be32(domain
->sig
.dif
.ref_tag
);
3435 /* repeating block */
3436 inl
->rp_inv_seed
= MLX5_BSF_REPEAT_BLOCK
;
3437 inl
->sig_type
= domain
->sig
.dif
.bg_type
== IB_T10DIF_CRC
?
3438 MLX5_DIF_CRC
: MLX5_DIF_IPCS
;
3440 if (domain
->sig
.dif
.ref_remap
)
3441 inl
->dif_inc_ref_guard_check
|= MLX5_BSF_INC_REFTAG
;
3443 if (domain
->sig
.dif
.app_escape
) {
3444 if (domain
->sig
.dif
.ref_escape
)
3445 inl
->dif_inc_ref_guard_check
|= MLX5_BSF_APPREF_ESCAPE
;
3447 inl
->dif_inc_ref_guard_check
|= MLX5_BSF_APPTAG_ESCAPE
;
3450 inl
->dif_app_bitmask_check
=
3451 cpu_to_be16(domain
->sig
.dif
.apptag_check_mask
);
3454 static int mlx5_set_bsf(struct ib_mr
*sig_mr
,
3455 struct ib_sig_attrs
*sig_attrs
,
3456 struct mlx5_bsf
*bsf
, u32 data_size
)
3458 struct mlx5_core_sig_ctx
*msig
= to_mmr(sig_mr
)->sig
;
3459 struct mlx5_bsf_basic
*basic
= &bsf
->basic
;
3460 struct ib_sig_domain
*mem
= &sig_attrs
->mem
;
3461 struct ib_sig_domain
*wire
= &sig_attrs
->wire
;
3463 memset(bsf
, 0, sizeof(*bsf
));
3465 /* Basic + Extended + Inline */
3466 basic
->bsf_size_sbs
= 1 << 7;
3467 /* Input domain check byte mask */
3468 basic
->check_byte_mask
= sig_attrs
->check_mask
;
3469 basic
->raw_data_size
= cpu_to_be32(data_size
);
3472 switch (sig_attrs
->mem
.sig_type
) {
3473 case IB_SIG_TYPE_NONE
:
3475 case IB_SIG_TYPE_T10_DIF
:
3476 basic
->mem
.bs_selector
= bs_selector(mem
->sig
.dif
.pi_interval
);
3477 basic
->m_bfs_psv
= cpu_to_be32(msig
->psv_memory
.psv_idx
);
3478 mlx5_fill_inl_bsf(mem
, &bsf
->m_inl
);
3485 switch (sig_attrs
->wire
.sig_type
) {
3486 case IB_SIG_TYPE_NONE
:
3488 case IB_SIG_TYPE_T10_DIF
:
3489 if (mem
->sig
.dif
.pi_interval
== wire
->sig
.dif
.pi_interval
&&
3490 mem
->sig_type
== wire
->sig_type
) {
3491 /* Same block structure */
3492 basic
->bsf_size_sbs
|= 1 << 4;
3493 if (mem
->sig
.dif
.bg_type
== wire
->sig
.dif
.bg_type
)
3494 basic
->wire
.copy_byte_mask
|= MLX5_CPY_GRD_MASK
;
3495 if (mem
->sig
.dif
.app_tag
== wire
->sig
.dif
.app_tag
)
3496 basic
->wire
.copy_byte_mask
|= MLX5_CPY_APP_MASK
;
3497 if (mem
->sig
.dif
.ref_tag
== wire
->sig
.dif
.ref_tag
)
3498 basic
->wire
.copy_byte_mask
|= MLX5_CPY_REF_MASK
;
3500 basic
->wire
.bs_selector
= bs_selector(wire
->sig
.dif
.pi_interval
);
3502 basic
->w_bfs_psv
= cpu_to_be32(msig
->psv_wire
.psv_idx
);
3503 mlx5_fill_inl_bsf(wire
, &bsf
->w_inl
);
3512 static int set_sig_data_segment(struct ib_sig_handover_wr
*wr
,
3513 struct mlx5_ib_qp
*qp
, void **seg
, int *size
)
3515 struct ib_sig_attrs
*sig_attrs
= wr
->sig_attrs
;
3516 struct ib_mr
*sig_mr
= wr
->sig_mr
;
3517 struct mlx5_bsf
*bsf
;
3518 u32 data_len
= wr
->wr
.sg_list
->length
;
3519 u32 data_key
= wr
->wr
.sg_list
->lkey
;
3520 u64 data_va
= wr
->wr
.sg_list
->addr
;
3525 (data_key
== wr
->prot
->lkey
&&
3526 data_va
== wr
->prot
->addr
&&
3527 data_len
== wr
->prot
->length
)) {
3529 * Source domain doesn't contain signature information
3530 * or data and protection are interleaved in memory.
3531 * So need construct:
3532 * ------------------
3534 * ------------------
3536 * ------------------
3538 struct mlx5_klm
*data_klm
= *seg
;
3540 data_klm
->bcount
= cpu_to_be32(data_len
);
3541 data_klm
->key
= cpu_to_be32(data_key
);
3542 data_klm
->va
= cpu_to_be64(data_va
);
3543 wqe_size
= ALIGN(sizeof(*data_klm
), 64);
3546 * Source domain contains signature information
3547 * So need construct a strided block format:
3548 * ---------------------------
3549 * | stride_block_ctrl |
3550 * ---------------------------
3552 * ---------------------------
3554 * ---------------------------
3556 * ---------------------------
3558 struct mlx5_stride_block_ctrl_seg
*sblock_ctrl
;
3559 struct mlx5_stride_block_entry
*data_sentry
;
3560 struct mlx5_stride_block_entry
*prot_sentry
;
3561 u32 prot_key
= wr
->prot
->lkey
;
3562 u64 prot_va
= wr
->prot
->addr
;
3563 u16 block_size
= sig_attrs
->mem
.sig
.dif
.pi_interval
;
3567 data_sentry
= (void *)sblock_ctrl
+ sizeof(*sblock_ctrl
);
3568 prot_sentry
= (void *)data_sentry
+ sizeof(*data_sentry
);
3570 prot_size
= prot_field_size(sig_attrs
->mem
.sig_type
);
3572 pr_err("Bad block size given: %u\n", block_size
);
3575 sblock_ctrl
->bcount_per_cycle
= cpu_to_be32(block_size
+
3577 sblock_ctrl
->op
= cpu_to_be32(MLX5_STRIDE_BLOCK_OP
);
3578 sblock_ctrl
->repeat_count
= cpu_to_be32(data_len
/ block_size
);
3579 sblock_ctrl
->num_entries
= cpu_to_be16(2);
3581 data_sentry
->bcount
= cpu_to_be16(block_size
);
3582 data_sentry
->key
= cpu_to_be32(data_key
);
3583 data_sentry
->va
= cpu_to_be64(data_va
);
3584 data_sentry
->stride
= cpu_to_be16(block_size
);
3586 prot_sentry
->bcount
= cpu_to_be16(prot_size
);
3587 prot_sentry
->key
= cpu_to_be32(prot_key
);
3588 prot_sentry
->va
= cpu_to_be64(prot_va
);
3589 prot_sentry
->stride
= cpu_to_be16(prot_size
);
3591 wqe_size
= ALIGN(sizeof(*sblock_ctrl
) + sizeof(*data_sentry
) +
3592 sizeof(*prot_sentry
), 64);
3596 *size
+= wqe_size
/ 16;
3597 if (unlikely((*seg
== qp
->sq
.qend
)))
3598 *seg
= mlx5_get_send_wqe(qp
, 0);
3601 ret
= mlx5_set_bsf(sig_mr
, sig_attrs
, bsf
, data_len
);
3605 *seg
+= sizeof(*bsf
);
3606 *size
+= sizeof(*bsf
) / 16;
3607 if (unlikely((*seg
== qp
->sq
.qend
)))
3608 *seg
= mlx5_get_send_wqe(qp
, 0);
3613 static void set_sig_mkey_segment(struct mlx5_mkey_seg
*seg
,
3614 struct ib_sig_handover_wr
*wr
, u32 nelements
,
3615 u32 length
, u32 pdn
)
3617 struct ib_mr
*sig_mr
= wr
->sig_mr
;
3618 u32 sig_key
= sig_mr
->rkey
;
3619 u8 sigerr
= to_mmr(sig_mr
)->sig
->sigerr_count
& 1;
3621 memset(seg
, 0, sizeof(*seg
));
3623 seg
->flags
= get_umr_flags(wr
->access_flags
) |
3624 MLX5_MKC_ACCESS_MODE_KLMS
;
3625 seg
->qpn_mkey7_0
= cpu_to_be32((sig_key
& 0xff) | 0xffffff00);
3626 seg
->flags_pd
= cpu_to_be32(MLX5_MKEY_REMOTE_INVAL
| sigerr
<< 26 |
3627 MLX5_MKEY_BSF_EN
| pdn
);
3628 seg
->len
= cpu_to_be64(length
);
3629 seg
->xlt_oct_size
= cpu_to_be32(be16_to_cpu(get_klm_octo(nelements
)));
3630 seg
->bsfs_octo_size
= cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE
);
3633 static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg
*umr
,
3636 memset(umr
, 0, sizeof(*umr
));
3638 umr
->flags
= MLX5_FLAGS_INLINE
| MLX5_FLAGS_CHECK_FREE
;
3639 umr
->klm_octowords
= get_klm_octo(nelements
);
3640 umr
->bsf_octowords
= cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE
);
3641 umr
->mkey_mask
= sig_mkey_mask();
3645 static int set_sig_umr_wr(struct ib_send_wr
*send_wr
, struct mlx5_ib_qp
*qp
,
3646 void **seg
, int *size
)
3648 struct ib_sig_handover_wr
*wr
= sig_handover_wr(send_wr
);
3649 struct mlx5_ib_mr
*sig_mr
= to_mmr(wr
->sig_mr
);
3650 u32 pdn
= get_pd(qp
)->pdn
;
3652 int region_len
, ret
;
3654 if (unlikely(wr
->wr
.num_sge
!= 1) ||
3655 unlikely(wr
->access_flags
& IB_ACCESS_REMOTE_ATOMIC
) ||
3656 unlikely(!sig_mr
->sig
) || unlikely(!qp
->signature_en
) ||
3657 unlikely(!sig_mr
->sig
->sig_status_checked
))
3660 /* length of the protected region, data + protection */
3661 region_len
= wr
->wr
.sg_list
->length
;
3663 (wr
->prot
->lkey
!= wr
->wr
.sg_list
->lkey
||
3664 wr
->prot
->addr
!= wr
->wr
.sg_list
->addr
||
3665 wr
->prot
->length
!= wr
->wr
.sg_list
->length
))
3666 region_len
+= wr
->prot
->length
;
3669 * KLM octoword size - if protection was provided
3670 * then we use strided block format (3 octowords),
3671 * else we use single KLM (1 octoword)
3673 klm_oct_size
= wr
->prot
? 3 : 1;
3675 set_sig_umr_segment(*seg
, klm_oct_size
);
3676 *seg
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
);
3677 *size
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
) / 16;
3678 if (unlikely((*seg
== qp
->sq
.qend
)))
3679 *seg
= mlx5_get_send_wqe(qp
, 0);
3681 set_sig_mkey_segment(*seg
, wr
, klm_oct_size
, region_len
, pdn
);
3682 *seg
+= sizeof(struct mlx5_mkey_seg
);
3683 *size
+= sizeof(struct mlx5_mkey_seg
) / 16;
3684 if (unlikely((*seg
== qp
->sq
.qend
)))
3685 *seg
= mlx5_get_send_wqe(qp
, 0);
3687 ret
= set_sig_data_segment(wr
, qp
, seg
, size
);
3691 sig_mr
->sig
->sig_status_checked
= false;
3695 static int set_psv_wr(struct ib_sig_domain
*domain
,
3696 u32 psv_idx
, void **seg
, int *size
)
3698 struct mlx5_seg_set_psv
*psv_seg
= *seg
;
3700 memset(psv_seg
, 0, sizeof(*psv_seg
));
3701 psv_seg
->psv_num
= cpu_to_be32(psv_idx
);
3702 switch (domain
->sig_type
) {
3703 case IB_SIG_TYPE_NONE
:
3705 case IB_SIG_TYPE_T10_DIF
:
3706 psv_seg
->transient_sig
= cpu_to_be32(domain
->sig
.dif
.bg
<< 16 |
3707 domain
->sig
.dif
.app_tag
);
3708 psv_seg
->ref_tag
= cpu_to_be32(domain
->sig
.dif
.ref_tag
);
3711 pr_err("Bad signature type given.\n");
3715 *seg
+= sizeof(*psv_seg
);
3716 *size
+= sizeof(*psv_seg
) / 16;
3721 static int set_reg_wr(struct mlx5_ib_qp
*qp
,
3722 struct ib_reg_wr
*wr
,
3723 void **seg
, int *size
)
3725 struct mlx5_ib_mr
*mr
= to_mmr(wr
->mr
);
3726 struct mlx5_ib_pd
*pd
= to_mpd(qp
->ibqp
.pd
);
3728 if (unlikely(wr
->wr
.send_flags
& IB_SEND_INLINE
)) {
3729 mlx5_ib_warn(to_mdev(qp
->ibqp
.device
),
3730 "Invalid IB_SEND_INLINE send flag\n");
3734 set_reg_umr_seg(*seg
, mr
);
3735 *seg
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
);
3736 *size
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
) / 16;
3737 if (unlikely((*seg
== qp
->sq
.qend
)))
3738 *seg
= mlx5_get_send_wqe(qp
, 0);
3740 set_reg_mkey_seg(*seg
, mr
, wr
->key
, wr
->access
);
3741 *seg
+= sizeof(struct mlx5_mkey_seg
);
3742 *size
+= sizeof(struct mlx5_mkey_seg
) / 16;
3743 if (unlikely((*seg
== qp
->sq
.qend
)))
3744 *seg
= mlx5_get_send_wqe(qp
, 0);
3746 set_reg_data_seg(*seg
, mr
, pd
);
3747 *seg
+= sizeof(struct mlx5_wqe_data_seg
);
3748 *size
+= (sizeof(struct mlx5_wqe_data_seg
) / 16);
3753 static void set_linv_wr(struct mlx5_ib_qp
*qp
, void **seg
, int *size
)
3755 set_linv_umr_seg(*seg
);
3756 *seg
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
);
3757 *size
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
) / 16;
3758 if (unlikely((*seg
== qp
->sq
.qend
)))
3759 *seg
= mlx5_get_send_wqe(qp
, 0);
3760 set_linv_mkey_seg(*seg
);
3761 *seg
+= sizeof(struct mlx5_mkey_seg
);
3762 *size
+= sizeof(struct mlx5_mkey_seg
) / 16;
3763 if (unlikely((*seg
== qp
->sq
.qend
)))
3764 *seg
= mlx5_get_send_wqe(qp
, 0);
3767 static void dump_wqe(struct mlx5_ib_qp
*qp
, int idx
, int size_16
)
3773 pr_debug("dump wqe at %p\n", mlx5_get_send_wqe(qp
, tidx
));
3774 for (i
= 0, j
= 0; i
< size_16
* 4; i
+= 4, j
+= 4) {
3775 if ((i
& 0xf) == 0) {
3776 void *buf
= mlx5_get_send_wqe(qp
, tidx
);
3777 tidx
= (tidx
+ 1) & (qp
->sq
.wqe_cnt
- 1);
3781 pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p
[j
]),
3782 be32_to_cpu(p
[j
+ 1]), be32_to_cpu(p
[j
+ 2]),
3783 be32_to_cpu(p
[j
+ 3]));
3787 static void mlx5_bf_copy(u64 __iomem
*dst
, u64
*src
,
3788 unsigned bytecnt
, struct mlx5_ib_qp
*qp
)
3790 while (bytecnt
> 0) {
3791 __iowrite64_copy(dst
++, src
++, 8);
3792 __iowrite64_copy(dst
++, src
++, 8);
3793 __iowrite64_copy(dst
++, src
++, 8);
3794 __iowrite64_copy(dst
++, src
++, 8);
3795 __iowrite64_copy(dst
++, src
++, 8);
3796 __iowrite64_copy(dst
++, src
++, 8);
3797 __iowrite64_copy(dst
++, src
++, 8);
3798 __iowrite64_copy(dst
++, src
++, 8);
3800 if (unlikely(src
== qp
->sq
.qend
))
3801 src
= mlx5_get_send_wqe(qp
, 0);
3805 static u8
get_fence(u8 fence
, struct ib_send_wr
*wr
)
3807 if (unlikely(wr
->opcode
== IB_WR_LOCAL_INV
&&
3808 wr
->send_flags
& IB_SEND_FENCE
))
3809 return MLX5_FENCE_MODE_STRONG_ORDERING
;
3811 if (unlikely(fence
)) {
3812 if (wr
->send_flags
& IB_SEND_FENCE
)
3813 return MLX5_FENCE_MODE_SMALL_AND_FENCE
;
3816 } else if (unlikely(wr
->send_flags
& IB_SEND_FENCE
)) {
3817 return MLX5_FENCE_MODE_FENCE
;
3823 static int begin_wqe(struct mlx5_ib_qp
*qp
, void **seg
,
3824 struct mlx5_wqe_ctrl_seg
**ctrl
,
3825 struct ib_send_wr
*wr
, unsigned *idx
,
3826 int *size
, int nreq
)
3828 if (unlikely(mlx5_wq_overflow(&qp
->sq
, nreq
, qp
->ibqp
.send_cq
)))
3831 *idx
= qp
->sq
.cur_post
& (qp
->sq
.wqe_cnt
- 1);
3832 *seg
= mlx5_get_send_wqe(qp
, *idx
);
3834 *(uint32_t *)(*seg
+ 8) = 0;
3835 (*ctrl
)->imm
= send_ieth(wr
);
3836 (*ctrl
)->fm_ce_se
= qp
->sq_signal_bits
|
3837 (wr
->send_flags
& IB_SEND_SIGNALED
?
3838 MLX5_WQE_CTRL_CQ_UPDATE
: 0) |
3839 (wr
->send_flags
& IB_SEND_SOLICITED
?
3840 MLX5_WQE_CTRL_SOLICITED
: 0);
3842 *seg
+= sizeof(**ctrl
);
3843 *size
= sizeof(**ctrl
) / 16;
3848 static void finish_wqe(struct mlx5_ib_qp
*qp
,
3849 struct mlx5_wqe_ctrl_seg
*ctrl
,
3850 u8 size
, unsigned idx
, u64 wr_id
,
3851 int nreq
, u8 fence
, u8 next_fence
,
3856 ctrl
->opmod_idx_opcode
= cpu_to_be32(((u32
)(qp
->sq
.cur_post
) << 8) |
3857 mlx5_opcode
| ((u32
)opmod
<< 24));
3858 ctrl
->qpn_ds
= cpu_to_be32(size
| (qp
->trans_qp
.base
.mqp
.qpn
<< 8));
3859 ctrl
->fm_ce_se
|= fence
;
3860 qp
->fm_cache
= next_fence
;
3861 if (unlikely(qp
->wq_sig
))
3862 ctrl
->signature
= wq_sig(ctrl
);
3864 qp
->sq
.wrid
[idx
] = wr_id
;
3865 qp
->sq
.w_list
[idx
].opcode
= mlx5_opcode
;
3866 qp
->sq
.wqe_head
[idx
] = qp
->sq
.head
+ nreq
;
3867 qp
->sq
.cur_post
+= DIV_ROUND_UP(size
* 16, MLX5_SEND_WQE_BB
);
3868 qp
->sq
.w_list
[idx
].next
= qp
->sq
.cur_post
;
3872 int mlx5_ib_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
3873 struct ib_send_wr
**bad_wr
)
3875 struct mlx5_wqe_ctrl_seg
*ctrl
= NULL
; /* compiler warning */
3876 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
3877 struct mlx5_core_dev
*mdev
= dev
->mdev
;
3878 struct mlx5_ib_qp
*qp
;
3879 struct mlx5_ib_mr
*mr
;
3880 struct mlx5_wqe_data_seg
*dpseg
;
3881 struct mlx5_wqe_xrc_seg
*xrc
;
3883 int uninitialized_var(size
);
3885 unsigned long flags
;
3896 if (unlikely(ibqp
->qp_type
== IB_QPT_GSI
))
3897 return mlx5_ib_gsi_post_send(ibqp
, wr
, bad_wr
);
3903 spin_lock_irqsave(&qp
->sq
.lock
, flags
);
3905 if (mdev
->state
== MLX5_DEVICE_STATE_INTERNAL_ERROR
) {
3912 for (nreq
= 0; wr
; nreq
++, wr
= wr
->next
) {
3913 if (unlikely(wr
->opcode
>= ARRAY_SIZE(mlx5_ib_opcode
))) {
3914 mlx5_ib_warn(dev
, "\n");
3920 fence
= qp
->fm_cache
;
3921 num_sge
= wr
->num_sge
;
3922 if (unlikely(num_sge
> qp
->sq
.max_gs
)) {
3923 mlx5_ib_warn(dev
, "\n");
3929 err
= begin_wqe(qp
, &seg
, &ctrl
, wr
, &idx
, &size
, nreq
);
3931 mlx5_ib_warn(dev
, "\n");
3937 switch (ibqp
->qp_type
) {
3938 case IB_QPT_XRC_INI
:
3940 seg
+= sizeof(*xrc
);
3941 size
+= sizeof(*xrc
) / 16;
3944 switch (wr
->opcode
) {
3945 case IB_WR_RDMA_READ
:
3946 case IB_WR_RDMA_WRITE
:
3947 case IB_WR_RDMA_WRITE_WITH_IMM
:
3948 set_raddr_seg(seg
, rdma_wr(wr
)->remote_addr
,
3950 seg
+= sizeof(struct mlx5_wqe_raddr_seg
);
3951 size
+= sizeof(struct mlx5_wqe_raddr_seg
) / 16;
3954 case IB_WR_ATOMIC_CMP_AND_SWP
:
3955 case IB_WR_ATOMIC_FETCH_AND_ADD
:
3956 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP
:
3957 mlx5_ib_warn(dev
, "Atomic operations are not supported yet\n");
3962 case IB_WR_LOCAL_INV
:
3963 next_fence
= MLX5_FENCE_MODE_INITIATOR_SMALL
;
3964 qp
->sq
.wr_data
[idx
] = IB_WR_LOCAL_INV
;
3965 ctrl
->imm
= cpu_to_be32(wr
->ex
.invalidate_rkey
);
3966 set_linv_wr(qp
, &seg
, &size
);
3971 next_fence
= MLX5_FENCE_MODE_INITIATOR_SMALL
;
3972 qp
->sq
.wr_data
[idx
] = IB_WR_REG_MR
;
3973 ctrl
->imm
= cpu_to_be32(reg_wr(wr
)->key
);
3974 err
= set_reg_wr(qp
, reg_wr(wr
), &seg
, &size
);
3982 case IB_WR_REG_SIG_MR
:
3983 qp
->sq
.wr_data
[idx
] = IB_WR_REG_SIG_MR
;
3984 mr
= to_mmr(sig_handover_wr(wr
)->sig_mr
);
3986 ctrl
->imm
= cpu_to_be32(mr
->ibmr
.rkey
);
3987 err
= set_sig_umr_wr(wr
, qp
, &seg
, &size
);
3989 mlx5_ib_warn(dev
, "\n");
3994 finish_wqe(qp
, ctrl
, size
, idx
, wr
->wr_id
,
3995 nreq
, get_fence(fence
, wr
),
3996 next_fence
, MLX5_OPCODE_UMR
);
3998 * SET_PSV WQEs are not signaled and solicited
4001 wr
->send_flags
&= ~IB_SEND_SIGNALED
;
4002 wr
->send_flags
|= IB_SEND_SOLICITED
;
4003 err
= begin_wqe(qp
, &seg
, &ctrl
, wr
,
4006 mlx5_ib_warn(dev
, "\n");
4012 err
= set_psv_wr(&sig_handover_wr(wr
)->sig_attrs
->mem
,
4013 mr
->sig
->psv_memory
.psv_idx
, &seg
,
4016 mlx5_ib_warn(dev
, "\n");
4021 finish_wqe(qp
, ctrl
, size
, idx
, wr
->wr_id
,
4022 nreq
, get_fence(fence
, wr
),
4023 next_fence
, MLX5_OPCODE_SET_PSV
);
4024 err
= begin_wqe(qp
, &seg
, &ctrl
, wr
,
4027 mlx5_ib_warn(dev
, "\n");
4033 next_fence
= MLX5_FENCE_MODE_INITIATOR_SMALL
;
4034 err
= set_psv_wr(&sig_handover_wr(wr
)->sig_attrs
->wire
,
4035 mr
->sig
->psv_wire
.psv_idx
, &seg
,
4038 mlx5_ib_warn(dev
, "\n");
4043 finish_wqe(qp
, ctrl
, size
, idx
, wr
->wr_id
,
4044 nreq
, get_fence(fence
, wr
),
4045 next_fence
, MLX5_OPCODE_SET_PSV
);
4055 switch (wr
->opcode
) {
4056 case IB_WR_RDMA_WRITE
:
4057 case IB_WR_RDMA_WRITE_WITH_IMM
:
4058 set_raddr_seg(seg
, rdma_wr(wr
)->remote_addr
,
4060 seg
+= sizeof(struct mlx5_wqe_raddr_seg
);
4061 size
+= sizeof(struct mlx5_wqe_raddr_seg
) / 16;
4070 case MLX5_IB_QPT_HW_GSI
:
4071 set_datagram_seg(seg
, wr
);
4072 seg
+= sizeof(struct mlx5_wqe_datagram_seg
);
4073 size
+= sizeof(struct mlx5_wqe_datagram_seg
) / 16;
4074 if (unlikely((seg
== qend
)))
4075 seg
= mlx5_get_send_wqe(qp
, 0);
4078 set_datagram_seg(seg
, wr
);
4079 seg
+= sizeof(struct mlx5_wqe_datagram_seg
);
4080 size
+= sizeof(struct mlx5_wqe_datagram_seg
) / 16;
4082 if (unlikely((seg
== qend
)))
4083 seg
= mlx5_get_send_wqe(qp
, 0);
4085 /* handle qp that supports ud offload */
4086 if (qp
->flags
& IB_QP_CREATE_IPOIB_UD_LSO
) {
4087 struct mlx5_wqe_eth_pad
*pad
;
4090 memset(pad
, 0, sizeof(struct mlx5_wqe_eth_pad
));
4091 seg
+= sizeof(struct mlx5_wqe_eth_pad
);
4092 size
+= sizeof(struct mlx5_wqe_eth_pad
) / 16;
4094 seg
= set_eth_seg(seg
, wr
, qend
, qp
, &size
);
4096 if (unlikely((seg
== qend
)))
4097 seg
= mlx5_get_send_wqe(qp
, 0);
4100 case MLX5_IB_QPT_REG_UMR
:
4101 if (wr
->opcode
!= MLX5_IB_WR_UMR
) {
4103 mlx5_ib_warn(dev
, "bad opcode\n");
4106 qp
->sq
.wr_data
[idx
] = MLX5_IB_WR_UMR
;
4107 ctrl
->imm
= cpu_to_be32(umr_wr(wr
)->mkey
);
4108 set_reg_umr_segment(seg
, wr
, !!(MLX5_CAP_GEN(mdev
, atomic
)));
4109 seg
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
);
4110 size
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
) / 16;
4111 if (unlikely((seg
== qend
)))
4112 seg
= mlx5_get_send_wqe(qp
, 0);
4113 set_reg_mkey_segment(seg
, wr
);
4114 seg
+= sizeof(struct mlx5_mkey_seg
);
4115 size
+= sizeof(struct mlx5_mkey_seg
) / 16;
4116 if (unlikely((seg
== qend
)))
4117 seg
= mlx5_get_send_wqe(qp
, 0);
4124 if (wr
->send_flags
& IB_SEND_INLINE
&& num_sge
) {
4125 int uninitialized_var(sz
);
4127 err
= set_data_inl_seg(qp
, wr
, seg
, &sz
);
4128 if (unlikely(err
)) {
4129 mlx5_ib_warn(dev
, "\n");
4137 for (i
= 0; i
< num_sge
; i
++) {
4138 if (unlikely(dpseg
== qend
)) {
4139 seg
= mlx5_get_send_wqe(qp
, 0);
4142 if (likely(wr
->sg_list
[i
].length
)) {
4143 set_data_ptr_seg(dpseg
, wr
->sg_list
+ i
);
4144 size
+= sizeof(struct mlx5_wqe_data_seg
) / 16;
4150 finish_wqe(qp
, ctrl
, size
, idx
, wr
->wr_id
, nreq
,
4151 get_fence(fence
, wr
), next_fence
,
4152 mlx5_ib_opcode
[wr
->opcode
]);
4155 dump_wqe(qp
, idx
, size
);
4160 qp
->sq
.head
+= nreq
;
4162 /* Make sure that descriptors are written before
4163 * updating doorbell record and ringing the doorbell
4167 qp
->db
.db
[MLX5_SND_DBR
] = cpu_to_be32(qp
->sq
.cur_post
);
4169 /* Make sure doorbell record is visible to the HCA before
4170 * we hit doorbell */
4174 spin_lock(&bf
->lock
);
4176 __acquire(&bf
->lock
);
4179 if (0 && nreq
== 1 && bf
->uuarn
&& inl
&& size
> 1 && size
<= bf
->buf_size
/ 16) {
4180 mlx5_bf_copy(bf
->reg
+ bf
->offset
, (u64
*)ctrl
, ALIGN(size
* 16, 64), qp
);
4183 mlx5_write64((__be32
*)ctrl
, bf
->regreg
+ bf
->offset
,
4184 MLX5_GET_DOORBELL_LOCK(&bf
->lock32
));
4185 /* Make sure doorbells don't leak out of SQ spinlock
4186 * and reach the HCA out of order.
4190 bf
->offset
^= bf
->buf_size
;
4192 spin_unlock(&bf
->lock
);
4194 __release(&bf
->lock
);
4197 spin_unlock_irqrestore(&qp
->sq
.lock
, flags
);
4202 static void set_sig_seg(struct mlx5_rwqe_sig
*sig
, int size
)
4204 sig
->signature
= calc_sig(sig
, size
);
4207 int mlx5_ib_post_recv(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
4208 struct ib_recv_wr
**bad_wr
)
4210 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
4211 struct mlx5_wqe_data_seg
*scat
;
4212 struct mlx5_rwqe_sig
*sig
;
4213 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
4214 struct mlx5_core_dev
*mdev
= dev
->mdev
;
4215 unsigned long flags
;
4221 if (unlikely(ibqp
->qp_type
== IB_QPT_GSI
))
4222 return mlx5_ib_gsi_post_recv(ibqp
, wr
, bad_wr
);
4224 spin_lock_irqsave(&qp
->rq
.lock
, flags
);
4226 if (mdev
->state
== MLX5_DEVICE_STATE_INTERNAL_ERROR
) {
4233 ind
= qp
->rq
.head
& (qp
->rq
.wqe_cnt
- 1);
4235 for (nreq
= 0; wr
; nreq
++, wr
= wr
->next
) {
4236 if (mlx5_wq_overflow(&qp
->rq
, nreq
, qp
->ibqp
.recv_cq
)) {
4242 if (unlikely(wr
->num_sge
> qp
->rq
.max_gs
)) {
4248 scat
= get_recv_wqe(qp
, ind
);
4252 for (i
= 0; i
< wr
->num_sge
; i
++)
4253 set_data_ptr_seg(scat
+ i
, wr
->sg_list
+ i
);
4255 if (i
< qp
->rq
.max_gs
) {
4256 scat
[i
].byte_count
= 0;
4257 scat
[i
].lkey
= cpu_to_be32(MLX5_INVALID_LKEY
);
4262 sig
= (struct mlx5_rwqe_sig
*)scat
;
4263 set_sig_seg(sig
, (qp
->rq
.max_gs
+ 1) << 2);
4266 qp
->rq
.wrid
[ind
] = wr
->wr_id
;
4268 ind
= (ind
+ 1) & (qp
->rq
.wqe_cnt
- 1);
4273 qp
->rq
.head
+= nreq
;
4275 /* Make sure that descriptors are written before
4280 *qp
->db
.db
= cpu_to_be32(qp
->rq
.head
& 0xffff);
4283 spin_unlock_irqrestore(&qp
->rq
.lock
, flags
);
4288 static inline enum ib_qp_state
to_ib_qp_state(enum mlx5_qp_state mlx5_state
)
4290 switch (mlx5_state
) {
4291 case MLX5_QP_STATE_RST
: return IB_QPS_RESET
;
4292 case MLX5_QP_STATE_INIT
: return IB_QPS_INIT
;
4293 case MLX5_QP_STATE_RTR
: return IB_QPS_RTR
;
4294 case MLX5_QP_STATE_RTS
: return IB_QPS_RTS
;
4295 case MLX5_QP_STATE_SQ_DRAINING
:
4296 case MLX5_QP_STATE_SQD
: return IB_QPS_SQD
;
4297 case MLX5_QP_STATE_SQER
: return IB_QPS_SQE
;
4298 case MLX5_QP_STATE_ERR
: return IB_QPS_ERR
;
4303 static inline enum ib_mig_state
to_ib_mig_state(int mlx5_mig_state
)
4305 switch (mlx5_mig_state
) {
4306 case MLX5_QP_PM_ARMED
: return IB_MIG_ARMED
;
4307 case MLX5_QP_PM_REARM
: return IB_MIG_REARM
;
4308 case MLX5_QP_PM_MIGRATED
: return IB_MIG_MIGRATED
;
4313 static int to_ib_qp_access_flags(int mlx5_flags
)
4317 if (mlx5_flags
& MLX5_QP_BIT_RRE
)
4318 ib_flags
|= IB_ACCESS_REMOTE_READ
;
4319 if (mlx5_flags
& MLX5_QP_BIT_RWE
)
4320 ib_flags
|= IB_ACCESS_REMOTE_WRITE
;
4321 if (mlx5_flags
& MLX5_QP_BIT_RAE
)
4322 ib_flags
|= IB_ACCESS_REMOTE_ATOMIC
;
4327 static void to_ib_ah_attr(struct mlx5_ib_dev
*ibdev
, struct ib_ah_attr
*ib_ah_attr
,
4328 struct mlx5_qp_path
*path
)
4330 struct mlx5_core_dev
*dev
= ibdev
->mdev
;
4332 memset(ib_ah_attr
, 0, sizeof(*ib_ah_attr
));
4333 ib_ah_attr
->port_num
= path
->port
;
4335 if (ib_ah_attr
->port_num
== 0 ||
4336 ib_ah_attr
->port_num
> MLX5_CAP_GEN(dev
, num_ports
))
4339 ib_ah_attr
->sl
= path
->dci_cfi_prio_sl
& 0xf;
4341 ib_ah_attr
->dlid
= be16_to_cpu(path
->rlid
);
4342 ib_ah_attr
->src_path_bits
= path
->grh_mlid
& 0x7f;
4343 ib_ah_attr
->static_rate
= path
->static_rate
? path
->static_rate
- 5 : 0;
4344 ib_ah_attr
->ah_flags
= (path
->grh_mlid
& (1 << 7)) ? IB_AH_GRH
: 0;
4345 if (ib_ah_attr
->ah_flags
) {
4346 ib_ah_attr
->grh
.sgid_index
= path
->mgid_index
;
4347 ib_ah_attr
->grh
.hop_limit
= path
->hop_limit
;
4348 ib_ah_attr
->grh
.traffic_class
=
4349 (be32_to_cpu(path
->tclass_flowlabel
) >> 20) & 0xff;
4350 ib_ah_attr
->grh
.flow_label
=
4351 be32_to_cpu(path
->tclass_flowlabel
) & 0xfffff;
4352 memcpy(ib_ah_attr
->grh
.dgid
.raw
,
4353 path
->rgid
, sizeof(ib_ah_attr
->grh
.dgid
.raw
));
4357 static int query_raw_packet_qp_sq_state(struct mlx5_ib_dev
*dev
,
4358 struct mlx5_ib_sq
*sq
,
4366 inlen
= MLX5_ST_SZ_BYTES(query_sq_out
);
4367 out
= mlx5_vzalloc(inlen
);
4371 err
= mlx5_core_query_sq(dev
->mdev
, sq
->base
.mqp
.qpn
, out
);
4375 sqc
= MLX5_ADDR_OF(query_sq_out
, out
, sq_context
);
4376 *sq_state
= MLX5_GET(sqc
, sqc
, state
);
4377 sq
->state
= *sq_state
;
4384 static int query_raw_packet_qp_rq_state(struct mlx5_ib_dev
*dev
,
4385 struct mlx5_ib_rq
*rq
,
4393 inlen
= MLX5_ST_SZ_BYTES(query_rq_out
);
4394 out
= mlx5_vzalloc(inlen
);
4398 err
= mlx5_core_query_rq(dev
->mdev
, rq
->base
.mqp
.qpn
, out
);
4402 rqc
= MLX5_ADDR_OF(query_rq_out
, out
, rq_context
);
4403 *rq_state
= MLX5_GET(rqc
, rqc
, state
);
4404 rq
->state
= *rq_state
;
4411 static int sqrq_state_to_qp_state(u8 sq_state
, u8 rq_state
,
4412 struct mlx5_ib_qp
*qp
, u8
*qp_state
)
4414 static const u8 sqrq_trans
[MLX5_RQ_NUM_STATE
][MLX5_SQ_NUM_STATE
] = {
4415 [MLX5_RQC_STATE_RST
] = {
4416 [MLX5_SQC_STATE_RST
] = IB_QPS_RESET
,
4417 [MLX5_SQC_STATE_RDY
] = MLX5_QP_STATE_BAD
,
4418 [MLX5_SQC_STATE_ERR
] = MLX5_QP_STATE_BAD
,
4419 [MLX5_SQ_STATE_NA
] = IB_QPS_RESET
,
4421 [MLX5_RQC_STATE_RDY
] = {
4422 [MLX5_SQC_STATE_RST
] = MLX5_QP_STATE_BAD
,
4423 [MLX5_SQC_STATE_RDY
] = MLX5_QP_STATE
,
4424 [MLX5_SQC_STATE_ERR
] = IB_QPS_SQE
,
4425 [MLX5_SQ_STATE_NA
] = MLX5_QP_STATE
,
4427 [MLX5_RQC_STATE_ERR
] = {
4428 [MLX5_SQC_STATE_RST
] = MLX5_QP_STATE_BAD
,
4429 [MLX5_SQC_STATE_RDY
] = MLX5_QP_STATE_BAD
,
4430 [MLX5_SQC_STATE_ERR
] = IB_QPS_ERR
,
4431 [MLX5_SQ_STATE_NA
] = IB_QPS_ERR
,
4433 [MLX5_RQ_STATE_NA
] = {
4434 [MLX5_SQC_STATE_RST
] = IB_QPS_RESET
,
4435 [MLX5_SQC_STATE_RDY
] = MLX5_QP_STATE
,
4436 [MLX5_SQC_STATE_ERR
] = MLX5_QP_STATE
,
4437 [MLX5_SQ_STATE_NA
] = MLX5_QP_STATE_BAD
,
4441 *qp_state
= sqrq_trans
[rq_state
][sq_state
];
4443 if (*qp_state
== MLX5_QP_STATE_BAD
) {
4444 WARN(1, "Buggy Raw Packet QP state, SQ 0x%x state: 0x%x, RQ 0x%x state: 0x%x",
4445 qp
->raw_packet_qp
.sq
.base
.mqp
.qpn
, sq_state
,
4446 qp
->raw_packet_qp
.rq
.base
.mqp
.qpn
, rq_state
);
4450 if (*qp_state
== MLX5_QP_STATE
)
4451 *qp_state
= qp
->state
;
4456 static int query_raw_packet_qp_state(struct mlx5_ib_dev
*dev
,
4457 struct mlx5_ib_qp
*qp
,
4458 u8
*raw_packet_qp_state
)
4460 struct mlx5_ib_raw_packet_qp
*raw_packet_qp
= &qp
->raw_packet_qp
;
4461 struct mlx5_ib_sq
*sq
= &raw_packet_qp
->sq
;
4462 struct mlx5_ib_rq
*rq
= &raw_packet_qp
->rq
;
4464 u8 sq_state
= MLX5_SQ_STATE_NA
;
4465 u8 rq_state
= MLX5_RQ_STATE_NA
;
4467 if (qp
->sq
.wqe_cnt
) {
4468 err
= query_raw_packet_qp_sq_state(dev
, sq
, &sq_state
);
4473 if (qp
->rq
.wqe_cnt
) {
4474 err
= query_raw_packet_qp_rq_state(dev
, rq
, &rq_state
);
4479 return sqrq_state_to_qp_state(sq_state
, rq_state
, qp
,
4480 raw_packet_qp_state
);
4483 static int query_qp_attr(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
4484 struct ib_qp_attr
*qp_attr
)
4486 int outlen
= MLX5_ST_SZ_BYTES(query_qp_out
);
4487 struct mlx5_qp_context
*context
;
4492 outb
= kzalloc(outlen
, GFP_KERNEL
);
4496 err
= mlx5_core_qp_query(dev
->mdev
, &qp
->trans_qp
.base
.mqp
, outb
,
4501 /* FIXME: use MLX5_GET rather than mlx5_qp_context manual struct */
4502 context
= (struct mlx5_qp_context
*)MLX5_ADDR_OF(query_qp_out
, outb
, qpc
);
4504 mlx5_state
= be32_to_cpu(context
->flags
) >> 28;
4506 qp
->state
= to_ib_qp_state(mlx5_state
);
4507 qp_attr
->path_mtu
= context
->mtu_msgmax
>> 5;
4508 qp_attr
->path_mig_state
=
4509 to_ib_mig_state((be32_to_cpu(context
->flags
) >> 11) & 0x3);
4510 qp_attr
->qkey
= be32_to_cpu(context
->qkey
);
4511 qp_attr
->rq_psn
= be32_to_cpu(context
->rnr_nextrecvpsn
) & 0xffffff;
4512 qp_attr
->sq_psn
= be32_to_cpu(context
->next_send_psn
) & 0xffffff;
4513 qp_attr
->dest_qp_num
= be32_to_cpu(context
->log_pg_sz_remote_qpn
) & 0xffffff;
4514 qp_attr
->qp_access_flags
=
4515 to_ib_qp_access_flags(be32_to_cpu(context
->params2
));
4517 if (qp
->ibqp
.qp_type
== IB_QPT_RC
|| qp
->ibqp
.qp_type
== IB_QPT_UC
) {
4518 to_ib_ah_attr(dev
, &qp_attr
->ah_attr
, &context
->pri_path
);
4519 to_ib_ah_attr(dev
, &qp_attr
->alt_ah_attr
, &context
->alt_path
);
4520 qp_attr
->alt_pkey_index
=
4521 be16_to_cpu(context
->alt_path
.pkey_index
);
4522 qp_attr
->alt_port_num
= qp_attr
->alt_ah_attr
.port_num
;
4525 qp_attr
->pkey_index
= be16_to_cpu(context
->pri_path
.pkey_index
);
4526 qp_attr
->port_num
= context
->pri_path
.port
;
4528 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
4529 qp_attr
->sq_draining
= mlx5_state
== MLX5_QP_STATE_SQ_DRAINING
;
4531 qp_attr
->max_rd_atomic
= 1 << ((be32_to_cpu(context
->params1
) >> 21) & 0x7);
4533 qp_attr
->max_dest_rd_atomic
=
4534 1 << ((be32_to_cpu(context
->params2
) >> 21) & 0x7);
4535 qp_attr
->min_rnr_timer
=
4536 (be32_to_cpu(context
->rnr_nextrecvpsn
) >> 24) & 0x1f;
4537 qp_attr
->timeout
= context
->pri_path
.ackto_lt
>> 3;
4538 qp_attr
->retry_cnt
= (be32_to_cpu(context
->params1
) >> 16) & 0x7;
4539 qp_attr
->rnr_retry
= (be32_to_cpu(context
->params1
) >> 13) & 0x7;
4540 qp_attr
->alt_timeout
= context
->alt_path
.ackto_lt
>> 3;
4547 int mlx5_ib_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*qp_attr
,
4548 int qp_attr_mask
, struct ib_qp_init_attr
*qp_init_attr
)
4550 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
4551 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
4553 u8 raw_packet_qp_state
;
4555 if (ibqp
->rwq_ind_tbl
)
4558 if (unlikely(ibqp
->qp_type
== IB_QPT_GSI
))
4559 return mlx5_ib_gsi_query_qp(ibqp
, qp_attr
, qp_attr_mask
,
4562 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
4564 * Wait for any outstanding page faults, in case the user frees memory
4565 * based upon this query's result.
4567 flush_workqueue(mlx5_ib_page_fault_wq
);
4570 mutex_lock(&qp
->mutex
);
4572 if (qp
->ibqp
.qp_type
== IB_QPT_RAW_PACKET
) {
4573 err
= query_raw_packet_qp_state(dev
, qp
, &raw_packet_qp_state
);
4576 qp
->state
= raw_packet_qp_state
;
4577 qp_attr
->port_num
= 1;
4579 err
= query_qp_attr(dev
, qp
, qp_attr
);
4584 qp_attr
->qp_state
= qp
->state
;
4585 qp_attr
->cur_qp_state
= qp_attr
->qp_state
;
4586 qp_attr
->cap
.max_recv_wr
= qp
->rq
.wqe_cnt
;
4587 qp_attr
->cap
.max_recv_sge
= qp
->rq
.max_gs
;
4589 if (!ibqp
->uobject
) {
4590 qp_attr
->cap
.max_send_wr
= qp
->sq
.max_post
;
4591 qp_attr
->cap
.max_send_sge
= qp
->sq
.max_gs
;
4592 qp_init_attr
->qp_context
= ibqp
->qp_context
;
4594 qp_attr
->cap
.max_send_wr
= 0;
4595 qp_attr
->cap
.max_send_sge
= 0;
4598 qp_init_attr
->qp_type
= ibqp
->qp_type
;
4599 qp_init_attr
->recv_cq
= ibqp
->recv_cq
;
4600 qp_init_attr
->send_cq
= ibqp
->send_cq
;
4601 qp_init_attr
->srq
= ibqp
->srq
;
4602 qp_attr
->cap
.max_inline_data
= qp
->max_inline_data
;
4604 qp_init_attr
->cap
= qp_attr
->cap
;
4606 qp_init_attr
->create_flags
= 0;
4607 if (qp
->flags
& MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK
)
4608 qp_init_attr
->create_flags
|= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
;
4610 if (qp
->flags
& MLX5_IB_QP_CROSS_CHANNEL
)
4611 qp_init_attr
->create_flags
|= IB_QP_CREATE_CROSS_CHANNEL
;
4612 if (qp
->flags
& MLX5_IB_QP_MANAGED_SEND
)
4613 qp_init_attr
->create_flags
|= IB_QP_CREATE_MANAGED_SEND
;
4614 if (qp
->flags
& MLX5_IB_QP_MANAGED_RECV
)
4615 qp_init_attr
->create_flags
|= IB_QP_CREATE_MANAGED_RECV
;
4616 if (qp
->flags
& MLX5_IB_QP_SQPN_QP1
)
4617 qp_init_attr
->create_flags
|= mlx5_ib_create_qp_sqpn_qp1();
4619 qp_init_attr
->sq_sig_type
= qp
->sq_signal_bits
& MLX5_WQE_CTRL_CQ_UPDATE
?
4620 IB_SIGNAL_ALL_WR
: IB_SIGNAL_REQ_WR
;
4623 mutex_unlock(&qp
->mutex
);
4627 struct ib_xrcd
*mlx5_ib_alloc_xrcd(struct ib_device
*ibdev
,
4628 struct ib_ucontext
*context
,
4629 struct ib_udata
*udata
)
4631 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
4632 struct mlx5_ib_xrcd
*xrcd
;
4635 if (!MLX5_CAP_GEN(dev
->mdev
, xrc
))
4636 return ERR_PTR(-ENOSYS
);
4638 xrcd
= kmalloc(sizeof(*xrcd
), GFP_KERNEL
);
4640 return ERR_PTR(-ENOMEM
);
4642 err
= mlx5_core_xrcd_alloc(dev
->mdev
, &xrcd
->xrcdn
);
4645 return ERR_PTR(-ENOMEM
);
4648 return &xrcd
->ibxrcd
;
4651 int mlx5_ib_dealloc_xrcd(struct ib_xrcd
*xrcd
)
4653 struct mlx5_ib_dev
*dev
= to_mdev(xrcd
->device
);
4654 u32 xrcdn
= to_mxrcd(xrcd
)->xrcdn
;
4657 err
= mlx5_core_xrcd_dealloc(dev
->mdev
, xrcdn
);
4659 mlx5_ib_warn(dev
, "failed to dealloc xrcdn 0x%x\n", xrcdn
);
4668 static void mlx5_ib_wq_event(struct mlx5_core_qp
*core_qp
, int type
)
4670 struct mlx5_ib_rwq
*rwq
= to_mibrwq(core_qp
);
4671 struct mlx5_ib_dev
*dev
= to_mdev(rwq
->ibwq
.device
);
4672 struct ib_event event
;
4674 if (rwq
->ibwq
.event_handler
) {
4675 event
.device
= rwq
->ibwq
.device
;
4676 event
.element
.wq
= &rwq
->ibwq
;
4678 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR
:
4679 event
.event
= IB_EVENT_WQ_FATAL
;
4682 mlx5_ib_warn(dev
, "Unexpected event type %d on WQ %06x\n", type
, core_qp
->qpn
);
4686 rwq
->ibwq
.event_handler(&event
, rwq
->ibwq
.wq_context
);
4690 static int create_rq(struct mlx5_ib_rwq
*rwq
, struct ib_pd
*pd
,
4691 struct ib_wq_init_attr
*init_attr
)
4693 struct mlx5_ib_dev
*dev
;
4701 dev
= to_mdev(pd
->device
);
4703 inlen
= MLX5_ST_SZ_BYTES(create_rq_in
) + sizeof(u64
) * rwq
->rq_num_pas
;
4704 in
= mlx5_vzalloc(inlen
);
4708 rqc
= MLX5_ADDR_OF(create_rq_in
, in
, ctx
);
4709 MLX5_SET(rqc
, rqc
, mem_rq_type
,
4710 MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE
);
4711 MLX5_SET(rqc
, rqc
, user_index
, rwq
->user_index
);
4712 MLX5_SET(rqc
, rqc
, cqn
, to_mcq(init_attr
->cq
)->mcq
.cqn
);
4713 MLX5_SET(rqc
, rqc
, state
, MLX5_RQC_STATE_RST
);
4714 MLX5_SET(rqc
, rqc
, flush_in_error_en
, 1);
4715 wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
4716 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_CYCLIC
);
4717 MLX5_SET(wq
, wq
, end_padding_mode
, MLX5_WQ_END_PAD_MODE_ALIGN
);
4718 MLX5_SET(wq
, wq
, log_wq_stride
, rwq
->log_rq_stride
);
4719 MLX5_SET(wq
, wq
, log_wq_sz
, rwq
->log_rq_size
);
4720 MLX5_SET(wq
, wq
, pd
, to_mpd(pd
)->pdn
);
4721 MLX5_SET(wq
, wq
, page_offset
, rwq
->rq_page_offset
);
4722 MLX5_SET(wq
, wq
, log_wq_pg_sz
, rwq
->log_page_size
);
4723 MLX5_SET(wq
, wq
, wq_signature
, rwq
->wq_sig
);
4724 MLX5_SET64(wq
, wq
, dbr_addr
, rwq
->db
.dma
);
4725 rq_pas0
= (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
);
4726 mlx5_ib_populate_pas(dev
, rwq
->umem
, rwq
->page_shift
, rq_pas0
, 0);
4727 err
= mlx5_core_create_rq_tracked(dev
->mdev
, in
, inlen
, &rwq
->core_qp
);
4732 static int set_user_rq_size(struct mlx5_ib_dev
*dev
,
4733 struct ib_wq_init_attr
*wq_init_attr
,
4734 struct mlx5_ib_create_wq
*ucmd
,
4735 struct mlx5_ib_rwq
*rwq
)
4737 /* Sanity check RQ size before proceeding */
4738 if (wq_init_attr
->max_wr
> (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_wq_sz
)))
4741 if (!ucmd
->rq_wqe_count
)
4744 rwq
->wqe_count
= ucmd
->rq_wqe_count
;
4745 rwq
->wqe_shift
= ucmd
->rq_wqe_shift
;
4746 rwq
->buf_size
= (rwq
->wqe_count
<< rwq
->wqe_shift
);
4747 rwq
->log_rq_stride
= rwq
->wqe_shift
;
4748 rwq
->log_rq_size
= ilog2(rwq
->wqe_count
);
4752 static int prepare_user_rq(struct ib_pd
*pd
,
4753 struct ib_wq_init_attr
*init_attr
,
4754 struct ib_udata
*udata
,
4755 struct mlx5_ib_rwq
*rwq
)
4757 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
4758 struct mlx5_ib_create_wq ucmd
= {};
4760 size_t required_cmd_sz
;
4762 required_cmd_sz
= offsetof(typeof(ucmd
), reserved
) + sizeof(ucmd
.reserved
);
4763 if (udata
->inlen
< required_cmd_sz
) {
4764 mlx5_ib_dbg(dev
, "invalid inlen\n");
4768 if (udata
->inlen
> sizeof(ucmd
) &&
4769 !ib_is_udata_cleared(udata
, sizeof(ucmd
),
4770 udata
->inlen
- sizeof(ucmd
))) {
4771 mlx5_ib_dbg(dev
, "inlen is not supported\n");
4775 if (ib_copy_from_udata(&ucmd
, udata
, min(sizeof(ucmd
), udata
->inlen
))) {
4776 mlx5_ib_dbg(dev
, "copy failed\n");
4780 if (ucmd
.comp_mask
) {
4781 mlx5_ib_dbg(dev
, "invalid comp mask\n");
4785 if (ucmd
.reserved
) {
4786 mlx5_ib_dbg(dev
, "invalid reserved\n");
4790 err
= set_user_rq_size(dev
, init_attr
, &ucmd
, rwq
);
4792 mlx5_ib_dbg(dev
, "err %d\n", err
);
4796 err
= create_user_rq(dev
, pd
, rwq
, &ucmd
);
4798 mlx5_ib_dbg(dev
, "err %d\n", err
);
4803 rwq
->user_index
= ucmd
.user_index
;
4807 struct ib_wq
*mlx5_ib_create_wq(struct ib_pd
*pd
,
4808 struct ib_wq_init_attr
*init_attr
,
4809 struct ib_udata
*udata
)
4811 struct mlx5_ib_dev
*dev
;
4812 struct mlx5_ib_rwq
*rwq
;
4813 struct mlx5_ib_create_wq_resp resp
= {};
4814 size_t min_resp_len
;
4818 return ERR_PTR(-ENOSYS
);
4820 min_resp_len
= offsetof(typeof(resp
), reserved
) + sizeof(resp
.reserved
);
4821 if (udata
->outlen
&& udata
->outlen
< min_resp_len
)
4822 return ERR_PTR(-EINVAL
);
4824 dev
= to_mdev(pd
->device
);
4825 switch (init_attr
->wq_type
) {
4827 rwq
= kzalloc(sizeof(*rwq
), GFP_KERNEL
);
4829 return ERR_PTR(-ENOMEM
);
4830 err
= prepare_user_rq(pd
, init_attr
, udata
, rwq
);
4833 err
= create_rq(rwq
, pd
, init_attr
);
4838 mlx5_ib_dbg(dev
, "unsupported wq type %d\n",
4839 init_attr
->wq_type
);
4840 return ERR_PTR(-EINVAL
);
4843 rwq
->ibwq
.wq_num
= rwq
->core_qp
.qpn
;
4844 rwq
->ibwq
.state
= IB_WQS_RESET
;
4845 if (udata
->outlen
) {
4846 resp
.response_length
= offsetof(typeof(resp
), response_length
) +
4847 sizeof(resp
.response_length
);
4848 err
= ib_copy_to_udata(udata
, &resp
, resp
.response_length
);
4853 rwq
->core_qp
.event
= mlx5_ib_wq_event
;
4854 rwq
->ibwq
.event_handler
= init_attr
->event_handler
;
4858 mlx5_core_destroy_rq_tracked(dev
->mdev
, &rwq
->core_qp
);
4860 destroy_user_rq(pd
, rwq
);
4863 return ERR_PTR(err
);
4866 int mlx5_ib_destroy_wq(struct ib_wq
*wq
)
4868 struct mlx5_ib_dev
*dev
= to_mdev(wq
->device
);
4869 struct mlx5_ib_rwq
*rwq
= to_mrwq(wq
);
4871 mlx5_core_destroy_rq_tracked(dev
->mdev
, &rwq
->core_qp
);
4872 destroy_user_rq(wq
->pd
, rwq
);
4878 struct ib_rwq_ind_table
*mlx5_ib_create_rwq_ind_table(struct ib_device
*device
,
4879 struct ib_rwq_ind_table_init_attr
*init_attr
,
4880 struct ib_udata
*udata
)
4882 struct mlx5_ib_dev
*dev
= to_mdev(device
);
4883 struct mlx5_ib_rwq_ind_table
*rwq_ind_tbl
;
4884 int sz
= 1 << init_attr
->log_ind_tbl_size
;
4885 struct mlx5_ib_create_rwq_ind_tbl_resp resp
= {};
4886 size_t min_resp_len
;
4893 if (udata
->inlen
> 0 &&
4894 !ib_is_udata_cleared(udata
, 0,
4896 return ERR_PTR(-EOPNOTSUPP
);
4898 if (init_attr
->log_ind_tbl_size
>
4899 MLX5_CAP_GEN(dev
->mdev
, log_max_rqt_size
)) {
4900 mlx5_ib_dbg(dev
, "log_ind_tbl_size = %d is bigger than supported = %d\n",
4901 init_attr
->log_ind_tbl_size
,
4902 MLX5_CAP_GEN(dev
->mdev
, log_max_rqt_size
));
4903 return ERR_PTR(-EINVAL
);
4906 min_resp_len
= offsetof(typeof(resp
), reserved
) + sizeof(resp
.reserved
);
4907 if (udata
->outlen
&& udata
->outlen
< min_resp_len
)
4908 return ERR_PTR(-EINVAL
);
4910 rwq_ind_tbl
= kzalloc(sizeof(*rwq_ind_tbl
), GFP_KERNEL
);
4912 return ERR_PTR(-ENOMEM
);
4914 inlen
= MLX5_ST_SZ_BYTES(create_rqt_in
) + sizeof(u32
) * sz
;
4915 in
= mlx5_vzalloc(inlen
);
4921 rqtc
= MLX5_ADDR_OF(create_rqt_in
, in
, rqt_context
);
4923 MLX5_SET(rqtc
, rqtc
, rqt_actual_size
, sz
);
4924 MLX5_SET(rqtc
, rqtc
, rqt_max_size
, sz
);
4926 for (i
= 0; i
< sz
; i
++)
4927 MLX5_SET(rqtc
, rqtc
, rq_num
[i
], init_attr
->ind_tbl
[i
]->wq_num
);
4929 err
= mlx5_core_create_rqt(dev
->mdev
, in
, inlen
, &rwq_ind_tbl
->rqtn
);
4935 rwq_ind_tbl
->ib_rwq_ind_tbl
.ind_tbl_num
= rwq_ind_tbl
->rqtn
;
4936 if (udata
->outlen
) {
4937 resp
.response_length
= offsetof(typeof(resp
), response_length
) +
4938 sizeof(resp
.response_length
);
4939 err
= ib_copy_to_udata(udata
, &resp
, resp
.response_length
);
4944 return &rwq_ind_tbl
->ib_rwq_ind_tbl
;
4947 mlx5_core_destroy_rqt(dev
->mdev
, rwq_ind_tbl
->rqtn
);
4950 return ERR_PTR(err
);
4953 int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table
*ib_rwq_ind_tbl
)
4955 struct mlx5_ib_rwq_ind_table
*rwq_ind_tbl
= to_mrwq_ind_table(ib_rwq_ind_tbl
);
4956 struct mlx5_ib_dev
*dev
= to_mdev(ib_rwq_ind_tbl
->device
);
4958 mlx5_core_destroy_rqt(dev
->mdev
, rwq_ind_tbl
->rqtn
);
4964 int mlx5_ib_modify_wq(struct ib_wq
*wq
, struct ib_wq_attr
*wq_attr
,
4965 u32 wq_attr_mask
, struct ib_udata
*udata
)
4967 struct mlx5_ib_dev
*dev
= to_mdev(wq
->device
);
4968 struct mlx5_ib_rwq
*rwq
= to_mrwq(wq
);
4969 struct mlx5_ib_modify_wq ucmd
= {};
4970 size_t required_cmd_sz
;
4978 required_cmd_sz
= offsetof(typeof(ucmd
), reserved
) + sizeof(ucmd
.reserved
);
4979 if (udata
->inlen
< required_cmd_sz
)
4982 if (udata
->inlen
> sizeof(ucmd
) &&
4983 !ib_is_udata_cleared(udata
, sizeof(ucmd
),
4984 udata
->inlen
- sizeof(ucmd
)))
4987 if (ib_copy_from_udata(&ucmd
, udata
, min(sizeof(ucmd
), udata
->inlen
)))
4990 if (ucmd
.comp_mask
|| ucmd
.reserved
)
4993 inlen
= MLX5_ST_SZ_BYTES(modify_rq_in
);
4994 in
= mlx5_vzalloc(inlen
);
4998 rqc
= MLX5_ADDR_OF(modify_rq_in
, in
, ctx
);
5000 curr_wq_state
= (wq_attr_mask
& IB_WQ_CUR_STATE
) ?
5001 wq_attr
->curr_wq_state
: wq
->state
;
5002 wq_state
= (wq_attr_mask
& IB_WQ_STATE
) ?
5003 wq_attr
->wq_state
: curr_wq_state
;
5004 if (curr_wq_state
== IB_WQS_ERR
)
5005 curr_wq_state
= MLX5_RQC_STATE_ERR
;
5006 if (wq_state
== IB_WQS_ERR
)
5007 wq_state
= MLX5_RQC_STATE_ERR
;
5008 MLX5_SET(modify_rq_in
, in
, rq_state
, curr_wq_state
);
5009 MLX5_SET(rqc
, rqc
, state
, wq_state
);
5011 err
= mlx5_core_modify_rq(dev
->mdev
, rwq
->core_qp
.qpn
, in
, inlen
);
5014 rwq
->ibwq
.state
= (wq_state
== MLX5_RQC_STATE_ERR
) ? IB_WQS_ERR
: wq_state
;