2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
34 #include <rdma/ib_umem.h>
38 /* not supported currently */
39 static int wq_signature
;
42 MLX5_IB_ACK_REQ_FREQ
= 8,
46 MLX5_IB_DEFAULT_SCHED_QUEUE
= 0x83,
47 MLX5_IB_DEFAULT_QP0_SCHED_QUEUE
= 0x3f,
48 MLX5_IB_LINK_TYPE_IB
= 0,
49 MLX5_IB_LINK_TYPE_ETH
= 1
53 MLX5_IB_SQ_STRIDE
= 6,
54 MLX5_IB_CACHE_LINE_SIZE
= 64,
57 static const u32 mlx5_ib_opcode
[] = {
58 [IB_WR_SEND
] = MLX5_OPCODE_SEND
,
59 [IB_WR_SEND_WITH_IMM
] = MLX5_OPCODE_SEND_IMM
,
60 [IB_WR_RDMA_WRITE
] = MLX5_OPCODE_RDMA_WRITE
,
61 [IB_WR_RDMA_WRITE_WITH_IMM
] = MLX5_OPCODE_RDMA_WRITE_IMM
,
62 [IB_WR_RDMA_READ
] = MLX5_OPCODE_RDMA_READ
,
63 [IB_WR_ATOMIC_CMP_AND_SWP
] = MLX5_OPCODE_ATOMIC_CS
,
64 [IB_WR_ATOMIC_FETCH_AND_ADD
] = MLX5_OPCODE_ATOMIC_FA
,
65 [IB_WR_SEND_WITH_INV
] = MLX5_OPCODE_SEND_INVAL
,
66 [IB_WR_LOCAL_INV
] = MLX5_OPCODE_UMR
,
67 [IB_WR_FAST_REG_MR
] = MLX5_OPCODE_UMR
,
68 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP
] = MLX5_OPCODE_ATOMIC_MASKED_CS
,
69 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
] = MLX5_OPCODE_ATOMIC_MASKED_FA
,
70 [MLX5_IB_WR_UMR
] = MLX5_OPCODE_UMR
,
74 static int is_qp0(enum ib_qp_type qp_type
)
76 return qp_type
== IB_QPT_SMI
;
79 static int is_qp1(enum ib_qp_type qp_type
)
81 return qp_type
== IB_QPT_GSI
;
84 static int is_sqp(enum ib_qp_type qp_type
)
86 return is_qp0(qp_type
) || is_qp1(qp_type
);
89 static void *get_wqe(struct mlx5_ib_qp
*qp
, int offset
)
91 return mlx5_buf_offset(&qp
->buf
, offset
);
94 static void *get_recv_wqe(struct mlx5_ib_qp
*qp
, int n
)
96 return get_wqe(qp
, qp
->rq
.offset
+ (n
<< qp
->rq
.wqe_shift
));
99 void *mlx5_get_send_wqe(struct mlx5_ib_qp
*qp
, int n
)
101 return get_wqe(qp
, qp
->sq
.offset
+ (n
<< MLX5_IB_SQ_STRIDE
));
105 * mlx5_ib_read_user_wqe() - Copy a user-space WQE to kernel space.
107 * @qp: QP to copy from.
108 * @send: copy from the send queue when non-zero, use the receive queue
110 * @wqe_index: index to start copying from. For send work queues, the
111 * wqe_index is in units of MLX5_SEND_WQE_BB.
112 * For receive work queue, it is the number of work queue
113 * element in the queue.
114 * @buffer: destination buffer.
115 * @length: maximum number of bytes to copy.
117 * Copies at least a single WQE, but may copy more data.
119 * Return: the number of bytes copied, or an error code.
121 int mlx5_ib_read_user_wqe(struct mlx5_ib_qp
*qp
, int send
, int wqe_index
,
122 void *buffer
, u32 length
)
124 struct ib_device
*ibdev
= qp
->ibqp
.device
;
125 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
126 struct mlx5_ib_wq
*wq
= send
? &qp
->sq
: &qp
->rq
;
129 struct ib_umem
*umem
= qp
->umem
;
130 u32 first_copy_length
;
134 if (wq
->wqe_cnt
== 0) {
135 mlx5_ib_dbg(dev
, "mlx5_ib_read_user_wqe for a QP with wqe_cnt == 0. qp_type: 0x%x\n",
140 offset
= wq
->offset
+ ((wqe_index
% wq
->wqe_cnt
) << wq
->wqe_shift
);
141 wq_end
= wq
->offset
+ (wq
->wqe_cnt
<< wq
->wqe_shift
);
143 if (send
&& length
< sizeof(struct mlx5_wqe_ctrl_seg
))
146 if (offset
> umem
->length
||
147 (send
&& offset
+ sizeof(struct mlx5_wqe_ctrl_seg
) > umem
->length
))
150 first_copy_length
= min_t(u32
, offset
+ length
, wq_end
) - offset
;
151 ret
= ib_umem_copy_from(buffer
, umem
, offset
, first_copy_length
);
156 struct mlx5_wqe_ctrl_seg
*ctrl
= buffer
;
157 int ds
= be32_to_cpu(ctrl
->qpn_ds
) & MLX5_WQE_CTRL_DS_MASK
;
159 wqe_length
= ds
* MLX5_WQE_DS_UNITS
;
161 wqe_length
= 1 << wq
->wqe_shift
;
164 if (wqe_length
<= first_copy_length
)
165 return first_copy_length
;
167 ret
= ib_umem_copy_from(buffer
+ first_copy_length
, umem
, wq
->offset
,
168 wqe_length
- first_copy_length
);
175 static void mlx5_ib_qp_event(struct mlx5_core_qp
*qp
, int type
)
177 struct ib_qp
*ibqp
= &to_mibqp(qp
)->ibqp
;
178 struct ib_event event
;
180 if (type
== MLX5_EVENT_TYPE_PATH_MIG
)
181 to_mibqp(qp
)->port
= to_mibqp(qp
)->alt_port
;
183 if (ibqp
->event_handler
) {
184 event
.device
= ibqp
->device
;
185 event
.element
.qp
= ibqp
;
187 case MLX5_EVENT_TYPE_PATH_MIG
:
188 event
.event
= IB_EVENT_PATH_MIG
;
190 case MLX5_EVENT_TYPE_COMM_EST
:
191 event
.event
= IB_EVENT_COMM_EST
;
193 case MLX5_EVENT_TYPE_SQ_DRAINED
:
194 event
.event
= IB_EVENT_SQ_DRAINED
;
196 case MLX5_EVENT_TYPE_SRQ_LAST_WQE
:
197 event
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
199 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR
:
200 event
.event
= IB_EVENT_QP_FATAL
;
202 case MLX5_EVENT_TYPE_PATH_MIG_FAILED
:
203 event
.event
= IB_EVENT_PATH_MIG_ERR
;
205 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR
:
206 event
.event
= IB_EVENT_QP_REQ_ERR
;
208 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR
:
209 event
.event
= IB_EVENT_QP_ACCESS_ERR
;
212 pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type
, qp
->qpn
);
216 ibqp
->event_handler(&event
, ibqp
->qp_context
);
220 static int set_rq_size(struct mlx5_ib_dev
*dev
, struct ib_qp_cap
*cap
,
221 int has_rq
, struct mlx5_ib_qp
*qp
, struct mlx5_ib_create_qp
*ucmd
)
223 struct mlx5_general_caps
*gen
;
227 gen
= &dev
->mdev
->caps
.gen
;
228 /* Sanity check RQ size before proceeding */
229 if (cap
->max_recv_wr
> gen
->max_wqes
)
235 qp
->rq
.wqe_shift
= 0;
238 qp
->rq
.wqe_cnt
= ucmd
->rq_wqe_count
;
239 qp
->rq
.wqe_shift
= ucmd
->rq_wqe_shift
;
240 qp
->rq
.max_gs
= (1 << qp
->rq
.wqe_shift
) / sizeof(struct mlx5_wqe_data_seg
) - qp
->wq_sig
;
241 qp
->rq
.max_post
= qp
->rq
.wqe_cnt
;
243 wqe_size
= qp
->wq_sig
? sizeof(struct mlx5_wqe_signature_seg
) : 0;
244 wqe_size
+= cap
->max_recv_sge
* sizeof(struct mlx5_wqe_data_seg
);
245 wqe_size
= roundup_pow_of_two(wqe_size
);
246 wq_size
= roundup_pow_of_two(cap
->max_recv_wr
) * wqe_size
;
247 wq_size
= max_t(int, wq_size
, MLX5_SEND_WQE_BB
);
248 qp
->rq
.wqe_cnt
= wq_size
/ wqe_size
;
249 if (wqe_size
> gen
->max_rq_desc_sz
) {
250 mlx5_ib_dbg(dev
, "wqe_size %d, max %d\n",
252 gen
->max_rq_desc_sz
);
255 qp
->rq
.wqe_shift
= ilog2(wqe_size
);
256 qp
->rq
.max_gs
= (1 << qp
->rq
.wqe_shift
) / sizeof(struct mlx5_wqe_data_seg
) - qp
->wq_sig
;
257 qp
->rq
.max_post
= qp
->rq
.wqe_cnt
;
264 static int sq_overhead(enum ib_qp_type qp_type
)
270 size
+= sizeof(struct mlx5_wqe_xrc_seg
);
273 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
274 sizeof(struct mlx5_wqe_atomic_seg
) +
275 sizeof(struct mlx5_wqe_raddr_seg
);
282 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
283 sizeof(struct mlx5_wqe_raddr_seg
) +
284 sizeof(struct mlx5_wqe_umr_ctrl_seg
) +
285 sizeof(struct mlx5_mkey_seg
);
291 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
292 sizeof(struct mlx5_wqe_datagram_seg
);
295 case MLX5_IB_QPT_REG_UMR
:
296 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
297 sizeof(struct mlx5_wqe_umr_ctrl_seg
) +
298 sizeof(struct mlx5_mkey_seg
);
308 static int calc_send_wqe(struct ib_qp_init_attr
*attr
)
313 size
= sq_overhead(attr
->qp_type
);
317 if (attr
->cap
.max_inline_data
) {
318 inl_size
= size
+ sizeof(struct mlx5_wqe_inline_seg
) +
319 attr
->cap
.max_inline_data
;
322 size
+= attr
->cap
.max_send_sge
* sizeof(struct mlx5_wqe_data_seg
);
323 if (attr
->create_flags
& IB_QP_CREATE_SIGNATURE_EN
&&
324 ALIGN(max_t(int, inl_size
, size
), MLX5_SEND_WQE_BB
) < MLX5_SIG_WQE_SIZE
)
325 return MLX5_SIG_WQE_SIZE
;
327 return ALIGN(max_t(int, inl_size
, size
), MLX5_SEND_WQE_BB
);
330 static int calc_sq_size(struct mlx5_ib_dev
*dev
, struct ib_qp_init_attr
*attr
,
331 struct mlx5_ib_qp
*qp
)
333 struct mlx5_general_caps
*gen
;
337 gen
= &dev
->mdev
->caps
.gen
;
338 if (!attr
->cap
.max_send_wr
)
341 wqe_size
= calc_send_wqe(attr
);
342 mlx5_ib_dbg(dev
, "wqe_size %d\n", wqe_size
);
346 if (wqe_size
> gen
->max_sq_desc_sz
) {
347 mlx5_ib_dbg(dev
, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
348 wqe_size
, gen
->max_sq_desc_sz
);
352 qp
->max_inline_data
= wqe_size
- sq_overhead(attr
->qp_type
) -
353 sizeof(struct mlx5_wqe_inline_seg
);
354 attr
->cap
.max_inline_data
= qp
->max_inline_data
;
356 if (attr
->create_flags
& IB_QP_CREATE_SIGNATURE_EN
)
357 qp
->signature_en
= true;
359 wq_size
= roundup_pow_of_two(attr
->cap
.max_send_wr
* wqe_size
);
360 qp
->sq
.wqe_cnt
= wq_size
/ MLX5_SEND_WQE_BB
;
361 if (qp
->sq
.wqe_cnt
> gen
->max_wqes
) {
362 mlx5_ib_dbg(dev
, "wqe count(%d) exceeds limits(%d)\n",
363 qp
->sq
.wqe_cnt
, gen
->max_wqes
);
366 qp
->sq
.wqe_shift
= ilog2(MLX5_SEND_WQE_BB
);
367 qp
->sq
.max_gs
= attr
->cap
.max_send_sge
;
368 qp
->sq
.max_post
= wq_size
/ wqe_size
;
369 attr
->cap
.max_send_wr
= qp
->sq
.max_post
;
374 static int set_user_buf_size(struct mlx5_ib_dev
*dev
,
375 struct mlx5_ib_qp
*qp
,
376 struct mlx5_ib_create_qp
*ucmd
)
378 struct mlx5_general_caps
*gen
;
379 int desc_sz
= 1 << qp
->sq
.wqe_shift
;
381 gen
= &dev
->mdev
->caps
.gen
;
382 if (desc_sz
> gen
->max_sq_desc_sz
) {
383 mlx5_ib_warn(dev
, "desc_sz %d, max_sq_desc_sz %d\n",
384 desc_sz
, gen
->max_sq_desc_sz
);
388 if (ucmd
->sq_wqe_count
&& ((1 << ilog2(ucmd
->sq_wqe_count
)) != ucmd
->sq_wqe_count
)) {
389 mlx5_ib_warn(dev
, "sq_wqe_count %d, sq_wqe_count %d\n",
390 ucmd
->sq_wqe_count
, ucmd
->sq_wqe_count
);
394 qp
->sq
.wqe_cnt
= ucmd
->sq_wqe_count
;
396 if (qp
->sq
.wqe_cnt
> gen
->max_wqes
) {
397 mlx5_ib_warn(dev
, "wqe_cnt %d, max_wqes %d\n",
398 qp
->sq
.wqe_cnt
, gen
->max_wqes
);
402 qp
->buf_size
= (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
) +
403 (qp
->sq
.wqe_cnt
<< 6);
408 static int qp_has_rq(struct ib_qp_init_attr
*attr
)
410 if (attr
->qp_type
== IB_QPT_XRC_INI
||
411 attr
->qp_type
== IB_QPT_XRC_TGT
|| attr
->srq
||
412 attr
->qp_type
== MLX5_IB_QPT_REG_UMR
||
413 !attr
->cap
.max_recv_wr
)
419 static int first_med_uuar(void)
424 static int next_uuar(int n
)
428 while (((n
% 4) & 2))
434 static int num_med_uuar(struct mlx5_uuar_info
*uuari
)
438 n
= uuari
->num_uars
* MLX5_NON_FP_BF_REGS_PER_PAGE
-
439 uuari
->num_low_latency_uuars
- 1;
441 return n
>= 0 ? n
: 0;
444 static int max_uuari(struct mlx5_uuar_info
*uuari
)
446 return uuari
->num_uars
* 4;
449 static int first_hi_uuar(struct mlx5_uuar_info
*uuari
)
455 med
= num_med_uuar(uuari
);
456 for (t
= 0, i
= first_med_uuar();; i
= next_uuar(i
)) {
465 static int alloc_high_class_uuar(struct mlx5_uuar_info
*uuari
)
469 for (i
= first_hi_uuar(uuari
); i
< max_uuari(uuari
); i
= next_uuar(i
)) {
470 if (!test_bit(i
, uuari
->bitmap
)) {
471 set_bit(i
, uuari
->bitmap
);
480 static int alloc_med_class_uuar(struct mlx5_uuar_info
*uuari
)
482 int minidx
= first_med_uuar();
485 for (i
= first_med_uuar(); i
< first_hi_uuar(uuari
); i
= next_uuar(i
)) {
486 if (uuari
->count
[i
] < uuari
->count
[minidx
])
490 uuari
->count
[minidx
]++;
494 static int alloc_uuar(struct mlx5_uuar_info
*uuari
,
495 enum mlx5_ib_latency_class lat
)
499 mutex_lock(&uuari
->lock
);
501 case MLX5_IB_LATENCY_CLASS_LOW
:
503 uuari
->count
[uuarn
]++;
506 case MLX5_IB_LATENCY_CLASS_MEDIUM
:
510 uuarn
= alloc_med_class_uuar(uuari
);
513 case MLX5_IB_LATENCY_CLASS_HIGH
:
517 uuarn
= alloc_high_class_uuar(uuari
);
520 case MLX5_IB_LATENCY_CLASS_FAST_PATH
:
524 mutex_unlock(&uuari
->lock
);
529 static void free_med_class_uuar(struct mlx5_uuar_info
*uuari
, int uuarn
)
531 clear_bit(uuarn
, uuari
->bitmap
);
532 --uuari
->count
[uuarn
];
535 static void free_high_class_uuar(struct mlx5_uuar_info
*uuari
, int uuarn
)
537 clear_bit(uuarn
, uuari
->bitmap
);
538 --uuari
->count
[uuarn
];
541 static void free_uuar(struct mlx5_uuar_info
*uuari
, int uuarn
)
543 int nuuars
= uuari
->num_uars
* MLX5_BF_REGS_PER_PAGE
;
544 int high_uuar
= nuuars
- uuari
->num_low_latency_uuars
;
546 mutex_lock(&uuari
->lock
);
548 --uuari
->count
[uuarn
];
552 if (uuarn
< high_uuar
) {
553 free_med_class_uuar(uuari
, uuarn
);
557 free_high_class_uuar(uuari
, uuarn
);
560 mutex_unlock(&uuari
->lock
);
563 static enum mlx5_qp_state
to_mlx5_state(enum ib_qp_state state
)
566 case IB_QPS_RESET
: return MLX5_QP_STATE_RST
;
567 case IB_QPS_INIT
: return MLX5_QP_STATE_INIT
;
568 case IB_QPS_RTR
: return MLX5_QP_STATE_RTR
;
569 case IB_QPS_RTS
: return MLX5_QP_STATE_RTS
;
570 case IB_QPS_SQD
: return MLX5_QP_STATE_SQD
;
571 case IB_QPS_SQE
: return MLX5_QP_STATE_SQER
;
572 case IB_QPS_ERR
: return MLX5_QP_STATE_ERR
;
577 static int to_mlx5_st(enum ib_qp_type type
)
580 case IB_QPT_RC
: return MLX5_QP_ST_RC
;
581 case IB_QPT_UC
: return MLX5_QP_ST_UC
;
582 case IB_QPT_UD
: return MLX5_QP_ST_UD
;
583 case MLX5_IB_QPT_REG_UMR
: return MLX5_QP_ST_REG_UMR
;
585 case IB_QPT_XRC_TGT
: return MLX5_QP_ST_XRC
;
586 case IB_QPT_SMI
: return MLX5_QP_ST_QP0
;
587 case IB_QPT_GSI
: return MLX5_QP_ST_QP1
;
588 case IB_QPT_RAW_IPV6
: return MLX5_QP_ST_RAW_IPV6
;
589 case IB_QPT_RAW_ETHERTYPE
: return MLX5_QP_ST_RAW_ETHERTYPE
;
590 case IB_QPT_RAW_PACKET
:
592 default: return -EINVAL
;
596 static int uuarn_to_uar_index(struct mlx5_uuar_info
*uuari
, int uuarn
)
598 return uuari
->uars
[uuarn
/ MLX5_BF_REGS_PER_PAGE
].index
;
601 static int create_user_qp(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
602 struct mlx5_ib_qp
*qp
, struct ib_udata
*udata
,
603 struct mlx5_create_qp_mbox_in
**in
,
604 struct mlx5_ib_create_qp_resp
*resp
, int *inlen
)
606 struct mlx5_ib_ucontext
*context
;
607 struct mlx5_ib_create_qp ucmd
;
616 err
= ib_copy_from_udata(&ucmd
, udata
, sizeof(ucmd
));
618 mlx5_ib_dbg(dev
, "copy failed\n");
622 context
= to_mucontext(pd
->uobject
->context
);
624 * TBD: should come from the verbs when we have the API
626 uuarn
= alloc_uuar(&context
->uuari
, MLX5_IB_LATENCY_CLASS_HIGH
);
628 mlx5_ib_dbg(dev
, "failed to allocate low latency UUAR\n");
629 mlx5_ib_dbg(dev
, "reverting to medium latency\n");
630 uuarn
= alloc_uuar(&context
->uuari
, MLX5_IB_LATENCY_CLASS_MEDIUM
);
632 mlx5_ib_dbg(dev
, "failed to allocate medium latency UUAR\n");
633 mlx5_ib_dbg(dev
, "reverting to high latency\n");
634 uuarn
= alloc_uuar(&context
->uuari
, MLX5_IB_LATENCY_CLASS_LOW
);
636 mlx5_ib_warn(dev
, "uuar allocation failed\n");
642 uar_index
= uuarn_to_uar_index(&context
->uuari
, uuarn
);
643 mlx5_ib_dbg(dev
, "uuarn 0x%x, uar_index 0x%x\n", uuarn
, uar_index
);
646 qp
->sq
.wqe_shift
= ilog2(MLX5_SEND_WQE_BB
);
647 qp
->sq
.offset
= qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
;
649 err
= set_user_buf_size(dev
, qp
, &ucmd
);
653 if (ucmd
.buf_addr
&& qp
->buf_size
) {
654 qp
->umem
= ib_umem_get(pd
->uobject
->context
, ucmd
.buf_addr
,
656 if (IS_ERR(qp
->umem
)) {
657 mlx5_ib_dbg(dev
, "umem_get failed\n");
658 err
= PTR_ERR(qp
->umem
);
666 mlx5_ib_cont_pages(qp
->umem
, ucmd
.buf_addr
, &npages
, &page_shift
,
668 err
= mlx5_ib_get_buf_offset(ucmd
.buf_addr
, page_shift
, &offset
);
670 mlx5_ib_warn(dev
, "bad offset\n");
673 mlx5_ib_dbg(dev
, "addr 0x%llx, size %d, npages %d, page_shift %d, ncont %d, offset %d\n",
674 ucmd
.buf_addr
, qp
->buf_size
, npages
, page_shift
, ncont
, offset
);
677 *inlen
= sizeof(**in
) + sizeof(*(*in
)->pas
) * ncont
;
678 *in
= mlx5_vzalloc(*inlen
);
684 mlx5_ib_populate_pas(dev
, qp
->umem
, page_shift
, (*in
)->pas
, 0);
685 (*in
)->ctx
.log_pg_sz_remote_qpn
=
686 cpu_to_be32((page_shift
- MLX5_ADAPTER_PAGE_SHIFT
) << 24);
687 (*in
)->ctx
.params2
= cpu_to_be32(offset
<< 6);
689 (*in
)->ctx
.qp_counter_set_usr_page
= cpu_to_be32(uar_index
);
690 resp
->uuar_index
= uuarn
;
693 err
= mlx5_ib_db_map_user(context
, ucmd
.db_addr
, &qp
->db
);
695 mlx5_ib_dbg(dev
, "map failed\n");
699 err
= ib_copy_to_udata(udata
, resp
, sizeof(*resp
));
701 mlx5_ib_dbg(dev
, "copy failed\n");
704 qp
->create_type
= MLX5_QP_USER
;
709 mlx5_ib_db_unmap_user(context
, &qp
->db
);
716 ib_umem_release(qp
->umem
);
719 free_uuar(&context
->uuari
, uuarn
);
723 static void destroy_qp_user(struct ib_pd
*pd
, struct mlx5_ib_qp
*qp
)
725 struct mlx5_ib_ucontext
*context
;
727 context
= to_mucontext(pd
->uobject
->context
);
728 mlx5_ib_db_unmap_user(context
, &qp
->db
);
730 ib_umem_release(qp
->umem
);
731 free_uuar(&context
->uuari
, qp
->uuarn
);
734 static int create_kernel_qp(struct mlx5_ib_dev
*dev
,
735 struct ib_qp_init_attr
*init_attr
,
736 struct mlx5_ib_qp
*qp
,
737 struct mlx5_create_qp_mbox_in
**in
, int *inlen
)
739 enum mlx5_ib_latency_class lc
= MLX5_IB_LATENCY_CLASS_LOW
;
740 struct mlx5_uuar_info
*uuari
;
745 uuari
= &dev
->mdev
->priv
.uuari
;
746 if (init_attr
->create_flags
& ~(IB_QP_CREATE_SIGNATURE_EN
| IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
))
749 if (init_attr
->qp_type
== MLX5_IB_QPT_REG_UMR
)
750 lc
= MLX5_IB_LATENCY_CLASS_FAST_PATH
;
752 uuarn
= alloc_uuar(uuari
, lc
);
754 mlx5_ib_dbg(dev
, "\n");
758 qp
->bf
= &uuari
->bfs
[uuarn
];
759 uar_index
= qp
->bf
->uar
->index
;
761 err
= calc_sq_size(dev
, init_attr
, qp
);
763 mlx5_ib_dbg(dev
, "err %d\n", err
);
768 qp
->sq
.offset
= qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
;
769 qp
->buf_size
= err
+ (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
);
771 err
= mlx5_buf_alloc(dev
->mdev
, qp
->buf_size
, PAGE_SIZE
* 2, &qp
->buf
);
773 mlx5_ib_dbg(dev
, "err %d\n", err
);
777 qp
->sq
.qend
= mlx5_get_send_wqe(qp
, qp
->sq
.wqe_cnt
);
778 *inlen
= sizeof(**in
) + sizeof(*(*in
)->pas
) * qp
->buf
.npages
;
779 *in
= mlx5_vzalloc(*inlen
);
784 (*in
)->ctx
.qp_counter_set_usr_page
= cpu_to_be32(uar_index
);
785 (*in
)->ctx
.log_pg_sz_remote_qpn
=
786 cpu_to_be32((qp
->buf
.page_shift
- MLX5_ADAPTER_PAGE_SHIFT
) << 24);
787 /* Set "fast registration enabled" for all kernel QPs */
788 (*in
)->ctx
.params1
|= cpu_to_be32(1 << 11);
789 (*in
)->ctx
.sq_crq_size
|= cpu_to_be16(1 << 4);
791 mlx5_fill_page_array(&qp
->buf
, (*in
)->pas
);
793 err
= mlx5_db_alloc(dev
->mdev
, &qp
->db
);
795 mlx5_ib_dbg(dev
, "err %d\n", err
);
802 qp
->sq
.wrid
= kmalloc(qp
->sq
.wqe_cnt
* sizeof(*qp
->sq
.wrid
), GFP_KERNEL
);
803 qp
->sq
.wr_data
= kmalloc(qp
->sq
.wqe_cnt
* sizeof(*qp
->sq
.wr_data
), GFP_KERNEL
);
804 qp
->rq
.wrid
= kmalloc(qp
->rq
.wqe_cnt
* sizeof(*qp
->rq
.wrid
), GFP_KERNEL
);
805 qp
->sq
.w_list
= kmalloc(qp
->sq
.wqe_cnt
* sizeof(*qp
->sq
.w_list
), GFP_KERNEL
);
806 qp
->sq
.wqe_head
= kmalloc(qp
->sq
.wqe_cnt
* sizeof(*qp
->sq
.wqe_head
), GFP_KERNEL
);
808 if (!qp
->sq
.wrid
|| !qp
->sq
.wr_data
|| !qp
->rq
.wrid
||
809 !qp
->sq
.w_list
|| !qp
->sq
.wqe_head
) {
813 qp
->create_type
= MLX5_QP_KERNEL
;
818 mlx5_db_free(dev
->mdev
, &qp
->db
);
819 kfree(qp
->sq
.wqe_head
);
820 kfree(qp
->sq
.w_list
);
822 kfree(qp
->sq
.wr_data
);
829 mlx5_buf_free(dev
->mdev
, &qp
->buf
);
832 free_uuar(&dev
->mdev
->priv
.uuari
, uuarn
);
836 static void destroy_qp_kernel(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
)
838 mlx5_db_free(dev
->mdev
, &qp
->db
);
839 kfree(qp
->sq
.wqe_head
);
840 kfree(qp
->sq
.w_list
);
842 kfree(qp
->sq
.wr_data
);
844 mlx5_buf_free(dev
->mdev
, &qp
->buf
);
845 free_uuar(&dev
->mdev
->priv
.uuari
, qp
->bf
->uuarn
);
848 static __be32
get_rx_type(struct mlx5_ib_qp
*qp
, struct ib_qp_init_attr
*attr
)
850 if (attr
->srq
|| (attr
->qp_type
== IB_QPT_XRC_TGT
) ||
851 (attr
->qp_type
== IB_QPT_XRC_INI
))
852 return cpu_to_be32(MLX5_SRQ_RQ
);
853 else if (!qp
->has_rq
)
854 return cpu_to_be32(MLX5_ZERO_LEN_RQ
);
856 return cpu_to_be32(MLX5_NON_ZERO_RQ
);
859 static int is_connected(enum ib_qp_type qp_type
)
861 if (qp_type
== IB_QPT_RC
|| qp_type
== IB_QPT_UC
)
867 static int create_qp_common(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
868 struct ib_qp_init_attr
*init_attr
,
869 struct ib_udata
*udata
, struct mlx5_ib_qp
*qp
)
871 struct mlx5_ib_resources
*devr
= &dev
->devr
;
872 struct mlx5_ib_create_qp_resp resp
;
873 struct mlx5_create_qp_mbox_in
*in
;
874 struct mlx5_general_caps
*gen
;
875 struct mlx5_ib_create_qp ucmd
;
876 int inlen
= sizeof(*in
);
879 mlx5_ib_odp_create_qp(qp
);
881 gen
= &dev
->mdev
->caps
.gen
;
882 mutex_init(&qp
->mutex
);
883 spin_lock_init(&qp
->sq
.lock
);
884 spin_lock_init(&qp
->rq
.lock
);
886 if (init_attr
->create_flags
& IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
) {
887 if (!(gen
->flags
& MLX5_DEV_CAP_FLAG_BLOCK_MCAST
)) {
888 mlx5_ib_dbg(dev
, "block multicast loopback isn't supported\n");
891 qp
->flags
|= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK
;
895 if (init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
)
896 qp
->sq_signal_bits
= MLX5_WQE_CTRL_CQ_UPDATE
;
898 if (pd
&& pd
->uobject
) {
899 if (ib_copy_from_udata(&ucmd
, udata
, sizeof(ucmd
))) {
900 mlx5_ib_dbg(dev
, "copy failed\n");
904 qp
->wq_sig
= !!(ucmd
.flags
& MLX5_QP_FLAG_SIGNATURE
);
905 qp
->scat_cqe
= !!(ucmd
.flags
& MLX5_QP_FLAG_SCATTER_CQE
);
907 qp
->wq_sig
= !!wq_signature
;
910 qp
->has_rq
= qp_has_rq(init_attr
);
911 err
= set_rq_size(dev
, &init_attr
->cap
, qp
->has_rq
,
912 qp
, (pd
&& pd
->uobject
) ? &ucmd
: NULL
);
914 mlx5_ib_dbg(dev
, "err %d\n", err
);
920 mlx5_ib_dbg(dev
, "requested sq_wqe_count (%d)\n", ucmd
.sq_wqe_count
);
921 if (ucmd
.rq_wqe_shift
!= qp
->rq
.wqe_shift
||
922 ucmd
.rq_wqe_count
!= qp
->rq
.wqe_cnt
) {
923 mlx5_ib_dbg(dev
, "invalid rq params\n");
926 if (ucmd
.sq_wqe_count
> gen
->max_wqes
) {
927 mlx5_ib_dbg(dev
, "requested sq_wqe_count (%d) > max allowed (%d)\n",
928 ucmd
.sq_wqe_count
, gen
->max_wqes
);
931 err
= create_user_qp(dev
, pd
, qp
, udata
, &in
, &resp
, &inlen
);
933 mlx5_ib_dbg(dev
, "err %d\n", err
);
935 err
= create_kernel_qp(dev
, init_attr
, qp
, &in
, &inlen
);
937 mlx5_ib_dbg(dev
, "err %d\n", err
);
939 qp
->pa_lkey
= to_mpd(pd
)->pa_lkey
;
945 in
= mlx5_vzalloc(sizeof(*in
));
949 qp
->create_type
= MLX5_QP_EMPTY
;
952 if (is_sqp(init_attr
->qp_type
))
953 qp
->port
= init_attr
->port_num
;
955 in
->ctx
.flags
= cpu_to_be32(to_mlx5_st(init_attr
->qp_type
) << 16 |
956 MLX5_QP_PM_MIGRATED
<< 11);
958 if (init_attr
->qp_type
!= MLX5_IB_QPT_REG_UMR
)
959 in
->ctx
.flags_pd
= cpu_to_be32(to_mpd(pd
? pd
: devr
->p0
)->pdn
);
961 in
->ctx
.flags_pd
= cpu_to_be32(MLX5_QP_LAT_SENSITIVE
);
964 in
->ctx
.flags_pd
|= cpu_to_be32(MLX5_QP_ENABLE_SIG
);
966 if (qp
->flags
& MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK
)
967 in
->ctx
.flags_pd
|= cpu_to_be32(MLX5_QP_BLOCK_MCAST
);
969 if (qp
->scat_cqe
&& is_connected(init_attr
->qp_type
)) {
973 rcqe_sz
= mlx5_ib_get_cqe_size(dev
, init_attr
->recv_cq
);
974 scqe_sz
= mlx5_ib_get_cqe_size(dev
, init_attr
->send_cq
);
977 in
->ctx
.cs_res
= MLX5_RES_SCAT_DATA64_CQE
;
979 in
->ctx
.cs_res
= MLX5_RES_SCAT_DATA32_CQE
;
981 if (init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
) {
983 in
->ctx
.cs_req
= MLX5_REQ_SCAT_DATA64_CQE
;
985 in
->ctx
.cs_req
= MLX5_REQ_SCAT_DATA32_CQE
;
989 if (qp
->rq
.wqe_cnt
) {
990 in
->ctx
.rq_size_stride
= (qp
->rq
.wqe_shift
- 4);
991 in
->ctx
.rq_size_stride
|= ilog2(qp
->rq
.wqe_cnt
) << 3;
994 in
->ctx
.rq_type_srqn
= get_rx_type(qp
, init_attr
);
997 in
->ctx
.sq_crq_size
|= cpu_to_be16(ilog2(qp
->sq
.wqe_cnt
) << 11);
999 in
->ctx
.sq_crq_size
|= cpu_to_be16(0x8000);
1001 /* Set default resources */
1002 switch (init_attr
->qp_type
) {
1003 case IB_QPT_XRC_TGT
:
1004 in
->ctx
.cqn_recv
= cpu_to_be32(to_mcq(devr
->c0
)->mcq
.cqn
);
1005 in
->ctx
.cqn_send
= cpu_to_be32(to_mcq(devr
->c0
)->mcq
.cqn
);
1006 in
->ctx
.rq_type_srqn
|= cpu_to_be32(to_msrq(devr
->s0
)->msrq
.srqn
);
1007 in
->ctx
.xrcd
= cpu_to_be32(to_mxrcd(init_attr
->xrcd
)->xrcdn
);
1009 case IB_QPT_XRC_INI
:
1010 in
->ctx
.cqn_recv
= cpu_to_be32(to_mcq(devr
->c0
)->mcq
.cqn
);
1011 in
->ctx
.xrcd
= cpu_to_be32(to_mxrcd(devr
->x1
)->xrcdn
);
1012 in
->ctx
.rq_type_srqn
|= cpu_to_be32(to_msrq(devr
->s0
)->msrq
.srqn
);
1015 if (init_attr
->srq
) {
1016 in
->ctx
.xrcd
= cpu_to_be32(to_mxrcd(devr
->x0
)->xrcdn
);
1017 in
->ctx
.rq_type_srqn
|= cpu_to_be32(to_msrq(init_attr
->srq
)->msrq
.srqn
);
1019 in
->ctx
.xrcd
= cpu_to_be32(to_mxrcd(devr
->x1
)->xrcdn
);
1020 in
->ctx
.rq_type_srqn
|= cpu_to_be32(to_msrq(devr
->s0
)->msrq
.srqn
);
1024 if (init_attr
->send_cq
)
1025 in
->ctx
.cqn_send
= cpu_to_be32(to_mcq(init_attr
->send_cq
)->mcq
.cqn
);
1027 if (init_attr
->recv_cq
)
1028 in
->ctx
.cqn_recv
= cpu_to_be32(to_mcq(init_attr
->recv_cq
)->mcq
.cqn
);
1030 in
->ctx
.db_rec_addr
= cpu_to_be64(qp
->db
.dma
);
1032 err
= mlx5_core_create_qp(dev
->mdev
, &qp
->mqp
, in
, inlen
);
1034 mlx5_ib_dbg(dev
, "create qp failed\n");
1039 /* Hardware wants QPN written in big-endian order (after
1040 * shifting) for send doorbell. Precompute this value to save
1041 * a little bit when posting sends.
1043 qp
->doorbell_qpn
= swab32(qp
->mqp
.qpn
<< 8);
1045 qp
->mqp
.event
= mlx5_ib_qp_event
;
1050 if (qp
->create_type
== MLX5_QP_USER
)
1051 destroy_qp_user(pd
, qp
);
1052 else if (qp
->create_type
== MLX5_QP_KERNEL
)
1053 destroy_qp_kernel(dev
, qp
);
1059 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq
*send_cq
, struct mlx5_ib_cq
*recv_cq
)
1060 __acquires(&send_cq
->lock
) __acquires(&recv_cq
->lock
)
1064 if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
1065 spin_lock_irq(&send_cq
->lock
);
1066 spin_lock_nested(&recv_cq
->lock
,
1067 SINGLE_DEPTH_NESTING
);
1068 } else if (send_cq
->mcq
.cqn
== recv_cq
->mcq
.cqn
) {
1069 spin_lock_irq(&send_cq
->lock
);
1070 __acquire(&recv_cq
->lock
);
1072 spin_lock_irq(&recv_cq
->lock
);
1073 spin_lock_nested(&send_cq
->lock
,
1074 SINGLE_DEPTH_NESTING
);
1077 spin_lock_irq(&send_cq
->lock
);
1078 __acquire(&recv_cq
->lock
);
1080 } else if (recv_cq
) {
1081 spin_lock_irq(&recv_cq
->lock
);
1082 __acquire(&send_cq
->lock
);
1084 __acquire(&send_cq
->lock
);
1085 __acquire(&recv_cq
->lock
);
1089 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq
*send_cq
, struct mlx5_ib_cq
*recv_cq
)
1090 __releases(&send_cq
->lock
) __releases(&recv_cq
->lock
)
1094 if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
1095 spin_unlock(&recv_cq
->lock
);
1096 spin_unlock_irq(&send_cq
->lock
);
1097 } else if (send_cq
->mcq
.cqn
== recv_cq
->mcq
.cqn
) {
1098 __release(&recv_cq
->lock
);
1099 spin_unlock_irq(&send_cq
->lock
);
1101 spin_unlock(&send_cq
->lock
);
1102 spin_unlock_irq(&recv_cq
->lock
);
1105 __release(&recv_cq
->lock
);
1106 spin_unlock_irq(&send_cq
->lock
);
1108 } else if (recv_cq
) {
1109 __release(&send_cq
->lock
);
1110 spin_unlock_irq(&recv_cq
->lock
);
1112 __release(&recv_cq
->lock
);
1113 __release(&send_cq
->lock
);
1117 static struct mlx5_ib_pd
*get_pd(struct mlx5_ib_qp
*qp
)
1119 return to_mpd(qp
->ibqp
.pd
);
1122 static void get_cqs(struct mlx5_ib_qp
*qp
,
1123 struct mlx5_ib_cq
**send_cq
, struct mlx5_ib_cq
**recv_cq
)
1125 switch (qp
->ibqp
.qp_type
) {
1126 case IB_QPT_XRC_TGT
:
1130 case MLX5_IB_QPT_REG_UMR
:
1131 case IB_QPT_XRC_INI
:
1132 *send_cq
= to_mcq(qp
->ibqp
.send_cq
);
1141 case IB_QPT_RAW_IPV6
:
1142 case IB_QPT_RAW_ETHERTYPE
:
1143 *send_cq
= to_mcq(qp
->ibqp
.send_cq
);
1144 *recv_cq
= to_mcq(qp
->ibqp
.recv_cq
);
1147 case IB_QPT_RAW_PACKET
:
1156 static void destroy_qp_common(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
)
1158 struct mlx5_ib_cq
*send_cq
, *recv_cq
;
1159 struct mlx5_modify_qp_mbox_in
*in
;
1162 in
= kzalloc(sizeof(*in
), GFP_KERNEL
);
1165 if (qp
->state
!= IB_QPS_RESET
) {
1166 mlx5_ib_qp_disable_pagefaults(qp
);
1167 if (mlx5_core_qp_modify(dev
->mdev
, to_mlx5_state(qp
->state
),
1168 MLX5_QP_STATE_RST
, in
, sizeof(*in
), &qp
->mqp
))
1169 mlx5_ib_warn(dev
, "mlx5_ib: modify QP %06x to RESET failed\n",
1173 get_cqs(qp
, &send_cq
, &recv_cq
);
1175 if (qp
->create_type
== MLX5_QP_KERNEL
) {
1176 mlx5_ib_lock_cqs(send_cq
, recv_cq
);
1177 __mlx5_ib_cq_clean(recv_cq
, qp
->mqp
.qpn
,
1178 qp
->ibqp
.srq
? to_msrq(qp
->ibqp
.srq
) : NULL
);
1179 if (send_cq
!= recv_cq
)
1180 __mlx5_ib_cq_clean(send_cq
, qp
->mqp
.qpn
, NULL
);
1181 mlx5_ib_unlock_cqs(send_cq
, recv_cq
);
1184 err
= mlx5_core_destroy_qp(dev
->mdev
, &qp
->mqp
);
1186 mlx5_ib_warn(dev
, "failed to destroy QP 0x%x\n", qp
->mqp
.qpn
);
1190 if (qp
->create_type
== MLX5_QP_KERNEL
)
1191 destroy_qp_kernel(dev
, qp
);
1192 else if (qp
->create_type
== MLX5_QP_USER
)
1193 destroy_qp_user(&get_pd(qp
)->ibpd
, qp
);
1196 static const char *ib_qp_type_str(enum ib_qp_type type
)
1200 return "IB_QPT_SMI";
1202 return "IB_QPT_GSI";
1209 case IB_QPT_RAW_IPV6
:
1210 return "IB_QPT_RAW_IPV6";
1211 case IB_QPT_RAW_ETHERTYPE
:
1212 return "IB_QPT_RAW_ETHERTYPE";
1213 case IB_QPT_XRC_INI
:
1214 return "IB_QPT_XRC_INI";
1215 case IB_QPT_XRC_TGT
:
1216 return "IB_QPT_XRC_TGT";
1217 case IB_QPT_RAW_PACKET
:
1218 return "IB_QPT_RAW_PACKET";
1219 case MLX5_IB_QPT_REG_UMR
:
1220 return "MLX5_IB_QPT_REG_UMR";
1223 return "Invalid QP type";
1227 struct ib_qp
*mlx5_ib_create_qp(struct ib_pd
*pd
,
1228 struct ib_qp_init_attr
*init_attr
,
1229 struct ib_udata
*udata
)
1231 struct mlx5_general_caps
*gen
;
1232 struct mlx5_ib_dev
*dev
;
1233 struct mlx5_ib_qp
*qp
;
1238 dev
= to_mdev(pd
->device
);
1240 /* being cautious here */
1241 if (init_attr
->qp_type
!= IB_QPT_XRC_TGT
&&
1242 init_attr
->qp_type
!= MLX5_IB_QPT_REG_UMR
) {
1243 pr_warn("%s: no PD for transport %s\n", __func__
,
1244 ib_qp_type_str(init_attr
->qp_type
));
1245 return ERR_PTR(-EINVAL
);
1247 dev
= to_mdev(to_mxrcd(init_attr
->xrcd
)->ibxrcd
.device
);
1249 gen
= &dev
->mdev
->caps
.gen
;
1251 switch (init_attr
->qp_type
) {
1252 case IB_QPT_XRC_TGT
:
1253 case IB_QPT_XRC_INI
:
1254 if (!(gen
->flags
& MLX5_DEV_CAP_FLAG_XRC
)) {
1255 mlx5_ib_dbg(dev
, "XRC not supported\n");
1256 return ERR_PTR(-ENOSYS
);
1258 init_attr
->recv_cq
= NULL
;
1259 if (init_attr
->qp_type
== IB_QPT_XRC_TGT
) {
1260 xrcdn
= to_mxrcd(init_attr
->xrcd
)->xrcdn
;
1261 init_attr
->send_cq
= NULL
;
1270 case MLX5_IB_QPT_REG_UMR
:
1271 qp
= kzalloc(sizeof(*qp
), GFP_KERNEL
);
1273 return ERR_PTR(-ENOMEM
);
1275 err
= create_qp_common(dev
, pd
, init_attr
, udata
, qp
);
1277 mlx5_ib_dbg(dev
, "create_qp_common failed\n");
1279 return ERR_PTR(err
);
1282 if (is_qp0(init_attr
->qp_type
))
1283 qp
->ibqp
.qp_num
= 0;
1284 else if (is_qp1(init_attr
->qp_type
))
1285 qp
->ibqp
.qp_num
= 1;
1287 qp
->ibqp
.qp_num
= qp
->mqp
.qpn
;
1289 mlx5_ib_dbg(dev
, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
1290 qp
->ibqp
.qp_num
, qp
->mqp
.qpn
, to_mcq(init_attr
->recv_cq
)->mcq
.cqn
,
1291 to_mcq(init_attr
->send_cq
)->mcq
.cqn
);
1297 case IB_QPT_RAW_IPV6
:
1298 case IB_QPT_RAW_ETHERTYPE
:
1299 case IB_QPT_RAW_PACKET
:
1302 mlx5_ib_dbg(dev
, "unsupported qp type %d\n",
1303 init_attr
->qp_type
);
1304 /* Don't support raw QPs */
1305 return ERR_PTR(-EINVAL
);
1311 int mlx5_ib_destroy_qp(struct ib_qp
*qp
)
1313 struct mlx5_ib_dev
*dev
= to_mdev(qp
->device
);
1314 struct mlx5_ib_qp
*mqp
= to_mqp(qp
);
1316 destroy_qp_common(dev
, mqp
);
1323 static __be32
to_mlx5_access_flags(struct mlx5_ib_qp
*qp
, const struct ib_qp_attr
*attr
,
1326 u32 hw_access_flags
= 0;
1330 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
1331 dest_rd_atomic
= attr
->max_dest_rd_atomic
;
1333 dest_rd_atomic
= qp
->resp_depth
;
1335 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
1336 access_flags
= attr
->qp_access_flags
;
1338 access_flags
= qp
->atomic_rd_en
;
1340 if (!dest_rd_atomic
)
1341 access_flags
&= IB_ACCESS_REMOTE_WRITE
;
1343 if (access_flags
& IB_ACCESS_REMOTE_READ
)
1344 hw_access_flags
|= MLX5_QP_BIT_RRE
;
1345 if (access_flags
& IB_ACCESS_REMOTE_ATOMIC
)
1346 hw_access_flags
|= (MLX5_QP_BIT_RAE
| MLX5_ATOMIC_MODE_CX
);
1347 if (access_flags
& IB_ACCESS_REMOTE_WRITE
)
1348 hw_access_flags
|= MLX5_QP_BIT_RWE
;
1350 return cpu_to_be32(hw_access_flags
);
1354 MLX5_PATH_FLAG_FL
= 1 << 0,
1355 MLX5_PATH_FLAG_FREE_AR
= 1 << 1,
1356 MLX5_PATH_FLAG_COUNTER
= 1 << 2,
1359 static int ib_rate_to_mlx5(struct mlx5_ib_dev
*dev
, u8 rate
)
1361 struct mlx5_general_caps
*gen
;
1363 gen
= &dev
->mdev
->caps
.gen
;
1364 if (rate
== IB_RATE_PORT_CURRENT
) {
1366 } else if (rate
< IB_RATE_2_5_GBPS
|| rate
> IB_RATE_300_GBPS
) {
1369 while (rate
!= IB_RATE_2_5_GBPS
&&
1370 !(1 << (rate
+ MLX5_STAT_RATE_OFFSET
) &
1371 gen
->stat_rate_support
))
1375 return rate
+ MLX5_STAT_RATE_OFFSET
;
1378 static int mlx5_set_path(struct mlx5_ib_dev
*dev
, const struct ib_ah_attr
*ah
,
1379 struct mlx5_qp_path
*path
, u8 port
, int attr_mask
,
1380 u32 path_flags
, const struct ib_qp_attr
*attr
)
1382 struct mlx5_general_caps
*gen
;
1385 gen
= &dev
->mdev
->caps
.gen
;
1386 path
->fl
= (path_flags
& MLX5_PATH_FLAG_FL
) ? 0x80 : 0;
1387 path
->free_ar
= (path_flags
& MLX5_PATH_FLAG_FREE_AR
) ? 0x80 : 0;
1389 if (attr_mask
& IB_QP_PKEY_INDEX
)
1390 path
->pkey_index
= attr
->pkey_index
;
1392 path
->grh_mlid
= ah
->src_path_bits
& 0x7f;
1393 path
->rlid
= cpu_to_be16(ah
->dlid
);
1395 if (ah
->ah_flags
& IB_AH_GRH
) {
1396 if (ah
->grh
.sgid_index
>= gen
->port
[port
- 1].gid_table_len
) {
1397 pr_err(KERN_ERR
"sgid_index (%u) too large. max is %d\n",
1398 ah
->grh
.sgid_index
, gen
->port
[port
- 1].gid_table_len
);
1401 path
->grh_mlid
|= 1 << 7;
1402 path
->mgid_index
= ah
->grh
.sgid_index
;
1403 path
->hop_limit
= ah
->grh
.hop_limit
;
1404 path
->tclass_flowlabel
=
1405 cpu_to_be32((ah
->grh
.traffic_class
<< 20) |
1406 (ah
->grh
.flow_label
));
1407 memcpy(path
->rgid
, ah
->grh
.dgid
.raw
, 16);
1410 err
= ib_rate_to_mlx5(dev
, ah
->static_rate
);
1413 path
->static_rate
= err
;
1416 if (attr_mask
& IB_QP_TIMEOUT
)
1417 path
->ackto_lt
= attr
->timeout
<< 3;
1419 path
->sl
= ah
->sl
& 0xf;
1424 static enum mlx5_qp_optpar opt_mask
[MLX5_QP_NUM_STATE
][MLX5_QP_NUM_STATE
][MLX5_QP_ST_MAX
] = {
1425 [MLX5_QP_STATE_INIT
] = {
1426 [MLX5_QP_STATE_INIT
] = {
1427 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_RRE
|
1428 MLX5_QP_OPTPAR_RAE
|
1429 MLX5_QP_OPTPAR_RWE
|
1430 MLX5_QP_OPTPAR_PKEY_INDEX
|
1431 MLX5_QP_OPTPAR_PRI_PORT
,
1432 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_RWE
|
1433 MLX5_QP_OPTPAR_PKEY_INDEX
|
1434 MLX5_QP_OPTPAR_PRI_PORT
,
1435 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_PKEY_INDEX
|
1436 MLX5_QP_OPTPAR_Q_KEY
|
1437 MLX5_QP_OPTPAR_PRI_PORT
,
1439 [MLX5_QP_STATE_RTR
] = {
1440 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
1441 MLX5_QP_OPTPAR_RRE
|
1442 MLX5_QP_OPTPAR_RAE
|
1443 MLX5_QP_OPTPAR_RWE
|
1444 MLX5_QP_OPTPAR_PKEY_INDEX
,
1445 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
1446 MLX5_QP_OPTPAR_RWE
|
1447 MLX5_QP_OPTPAR_PKEY_INDEX
,
1448 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_PKEY_INDEX
|
1449 MLX5_QP_OPTPAR_Q_KEY
,
1450 [MLX5_QP_ST_MLX
] = MLX5_QP_OPTPAR_PKEY_INDEX
|
1451 MLX5_QP_OPTPAR_Q_KEY
,
1452 [MLX5_QP_ST_XRC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
1453 MLX5_QP_OPTPAR_RRE
|
1454 MLX5_QP_OPTPAR_RAE
|
1455 MLX5_QP_OPTPAR_RWE
|
1456 MLX5_QP_OPTPAR_PKEY_INDEX
,
1459 [MLX5_QP_STATE_RTR
] = {
1460 [MLX5_QP_STATE_RTS
] = {
1461 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
1462 MLX5_QP_OPTPAR_RRE
|
1463 MLX5_QP_OPTPAR_RAE
|
1464 MLX5_QP_OPTPAR_RWE
|
1465 MLX5_QP_OPTPAR_PM_STATE
|
1466 MLX5_QP_OPTPAR_RNR_TIMEOUT
,
1467 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
1468 MLX5_QP_OPTPAR_RWE
|
1469 MLX5_QP_OPTPAR_PM_STATE
,
1470 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_Q_KEY
,
1473 [MLX5_QP_STATE_RTS
] = {
1474 [MLX5_QP_STATE_RTS
] = {
1475 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_RRE
|
1476 MLX5_QP_OPTPAR_RAE
|
1477 MLX5_QP_OPTPAR_RWE
|
1478 MLX5_QP_OPTPAR_RNR_TIMEOUT
|
1479 MLX5_QP_OPTPAR_PM_STATE
|
1480 MLX5_QP_OPTPAR_ALT_ADDR_PATH
,
1481 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_RWE
|
1482 MLX5_QP_OPTPAR_PM_STATE
|
1483 MLX5_QP_OPTPAR_ALT_ADDR_PATH
,
1484 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_Q_KEY
|
1485 MLX5_QP_OPTPAR_SRQN
|
1486 MLX5_QP_OPTPAR_CQN_RCV
,
1489 [MLX5_QP_STATE_SQER
] = {
1490 [MLX5_QP_STATE_RTS
] = {
1491 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_Q_KEY
,
1492 [MLX5_QP_ST_MLX
] = MLX5_QP_OPTPAR_Q_KEY
,
1493 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_RWE
,
1494 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_RNR_TIMEOUT
|
1495 MLX5_QP_OPTPAR_RWE
|
1496 MLX5_QP_OPTPAR_RAE
|
1502 static int ib_nr_to_mlx5_nr(int ib_mask
)
1507 case IB_QP_CUR_STATE
:
1509 case IB_QP_EN_SQD_ASYNC_NOTIFY
:
1511 case IB_QP_ACCESS_FLAGS
:
1512 return MLX5_QP_OPTPAR_RWE
| MLX5_QP_OPTPAR_RRE
|
1514 case IB_QP_PKEY_INDEX
:
1515 return MLX5_QP_OPTPAR_PKEY_INDEX
;
1517 return MLX5_QP_OPTPAR_PRI_PORT
;
1519 return MLX5_QP_OPTPAR_Q_KEY
;
1521 return MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH
|
1522 MLX5_QP_OPTPAR_PRI_PORT
;
1523 case IB_QP_PATH_MTU
:
1526 return MLX5_QP_OPTPAR_ACK_TIMEOUT
;
1527 case IB_QP_RETRY_CNT
:
1528 return MLX5_QP_OPTPAR_RETRY_COUNT
;
1529 case IB_QP_RNR_RETRY
:
1530 return MLX5_QP_OPTPAR_RNR_RETRY
;
1533 case IB_QP_MAX_QP_RD_ATOMIC
:
1534 return MLX5_QP_OPTPAR_SRA_MAX
;
1535 case IB_QP_ALT_PATH
:
1536 return MLX5_QP_OPTPAR_ALT_ADDR_PATH
;
1537 case IB_QP_MIN_RNR_TIMER
:
1538 return MLX5_QP_OPTPAR_RNR_TIMEOUT
;
1541 case IB_QP_MAX_DEST_RD_ATOMIC
:
1542 return MLX5_QP_OPTPAR_RRA_MAX
| MLX5_QP_OPTPAR_RWE
|
1543 MLX5_QP_OPTPAR_RRE
| MLX5_QP_OPTPAR_RAE
;
1544 case IB_QP_PATH_MIG_STATE
:
1545 return MLX5_QP_OPTPAR_PM_STATE
;
1548 case IB_QP_DEST_QPN
:
1554 static int ib_mask_to_mlx5_opt(int ib_mask
)
1559 for (i
= 0; i
< 8 * sizeof(int); i
++) {
1560 if ((1 << i
) & ib_mask
)
1561 result
|= ib_nr_to_mlx5_nr(1 << i
);
1567 static int __mlx5_ib_modify_qp(struct ib_qp
*ibqp
,
1568 const struct ib_qp_attr
*attr
, int attr_mask
,
1569 enum ib_qp_state cur_state
, enum ib_qp_state new_state
)
1571 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
1572 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
1573 struct mlx5_ib_cq
*send_cq
, *recv_cq
;
1574 struct mlx5_qp_context
*context
;
1575 struct mlx5_general_caps
*gen
;
1576 struct mlx5_modify_qp_mbox_in
*in
;
1577 struct mlx5_ib_pd
*pd
;
1578 enum mlx5_qp_state mlx5_cur
, mlx5_new
;
1579 enum mlx5_qp_optpar optpar
;
1584 gen
= &dev
->mdev
->caps
.gen
;
1585 in
= kzalloc(sizeof(*in
), GFP_KERNEL
);
1590 err
= to_mlx5_st(ibqp
->qp_type
);
1594 context
->flags
= cpu_to_be32(err
<< 16);
1596 if (!(attr_mask
& IB_QP_PATH_MIG_STATE
)) {
1597 context
->flags
|= cpu_to_be32(MLX5_QP_PM_MIGRATED
<< 11);
1599 switch (attr
->path_mig_state
) {
1600 case IB_MIG_MIGRATED
:
1601 context
->flags
|= cpu_to_be32(MLX5_QP_PM_MIGRATED
<< 11);
1604 context
->flags
|= cpu_to_be32(MLX5_QP_PM_REARM
<< 11);
1607 context
->flags
|= cpu_to_be32(MLX5_QP_PM_ARMED
<< 11);
1612 if (ibqp
->qp_type
== IB_QPT_GSI
|| ibqp
->qp_type
== IB_QPT_SMI
) {
1613 context
->mtu_msgmax
= (IB_MTU_256
<< 5) | 8;
1614 } else if (ibqp
->qp_type
== IB_QPT_UD
||
1615 ibqp
->qp_type
== MLX5_IB_QPT_REG_UMR
) {
1616 context
->mtu_msgmax
= (IB_MTU_4096
<< 5) | 12;
1617 } else if (attr_mask
& IB_QP_PATH_MTU
) {
1618 if (attr
->path_mtu
< IB_MTU_256
||
1619 attr
->path_mtu
> IB_MTU_4096
) {
1620 mlx5_ib_warn(dev
, "invalid mtu %d\n", attr
->path_mtu
);
1624 context
->mtu_msgmax
= (attr
->path_mtu
<< 5) | gen
->log_max_msg
;
1627 if (attr_mask
& IB_QP_DEST_QPN
)
1628 context
->log_pg_sz_remote_qpn
= cpu_to_be32(attr
->dest_qp_num
);
1630 if (attr_mask
& IB_QP_PKEY_INDEX
)
1631 context
->pri_path
.pkey_index
= attr
->pkey_index
;
1633 /* todo implement counter_index functionality */
1635 if (is_sqp(ibqp
->qp_type
))
1636 context
->pri_path
.port
= qp
->port
;
1638 if (attr_mask
& IB_QP_PORT
)
1639 context
->pri_path
.port
= attr
->port_num
;
1641 if (attr_mask
& IB_QP_AV
) {
1642 err
= mlx5_set_path(dev
, &attr
->ah_attr
, &context
->pri_path
,
1643 attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
,
1644 attr_mask
, 0, attr
);
1649 if (attr_mask
& IB_QP_TIMEOUT
)
1650 context
->pri_path
.ackto_lt
|= attr
->timeout
<< 3;
1652 if (attr_mask
& IB_QP_ALT_PATH
) {
1653 err
= mlx5_set_path(dev
, &attr
->alt_ah_attr
, &context
->alt_path
,
1654 attr
->alt_port_num
, attr_mask
, 0, attr
);
1660 get_cqs(qp
, &send_cq
, &recv_cq
);
1662 context
->flags_pd
= cpu_to_be32(pd
? pd
->pdn
: to_mpd(dev
->devr
.p0
)->pdn
);
1663 context
->cqn_send
= send_cq
? cpu_to_be32(send_cq
->mcq
.cqn
) : 0;
1664 context
->cqn_recv
= recv_cq
? cpu_to_be32(recv_cq
->mcq
.cqn
) : 0;
1665 context
->params1
= cpu_to_be32(MLX5_IB_ACK_REQ_FREQ
<< 28);
1667 if (attr_mask
& IB_QP_RNR_RETRY
)
1668 context
->params1
|= cpu_to_be32(attr
->rnr_retry
<< 13);
1670 if (attr_mask
& IB_QP_RETRY_CNT
)
1671 context
->params1
|= cpu_to_be32(attr
->retry_cnt
<< 16);
1673 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
) {
1674 if (attr
->max_rd_atomic
)
1676 cpu_to_be32(fls(attr
->max_rd_atomic
- 1) << 21);
1679 if (attr_mask
& IB_QP_SQ_PSN
)
1680 context
->next_send_psn
= cpu_to_be32(attr
->sq_psn
);
1682 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
) {
1683 if (attr
->max_dest_rd_atomic
)
1685 cpu_to_be32(fls(attr
->max_dest_rd_atomic
- 1) << 21);
1688 if (attr_mask
& (IB_QP_ACCESS_FLAGS
| IB_QP_MAX_DEST_RD_ATOMIC
))
1689 context
->params2
|= to_mlx5_access_flags(qp
, attr
, attr_mask
);
1691 if (attr_mask
& IB_QP_MIN_RNR_TIMER
)
1692 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->min_rnr_timer
<< 24);
1694 if (attr_mask
& IB_QP_RQ_PSN
)
1695 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->rq_psn
);
1697 if (attr_mask
& IB_QP_QKEY
)
1698 context
->qkey
= cpu_to_be32(attr
->qkey
);
1700 if (qp
->rq
.wqe_cnt
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
1701 context
->db_rec_addr
= cpu_to_be64(qp
->db
.dma
);
1703 if (cur_state
== IB_QPS_RTS
&& new_state
== IB_QPS_SQD
&&
1704 attr_mask
& IB_QP_EN_SQD_ASYNC_NOTIFY
&& attr
->en_sqd_async_notify
)
1709 if (!ibqp
->uobject
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
1710 context
->sq_crq_size
|= cpu_to_be16(1 << 4);
1713 mlx5_cur
= to_mlx5_state(cur_state
);
1714 mlx5_new
= to_mlx5_state(new_state
);
1715 mlx5_st
= to_mlx5_st(ibqp
->qp_type
);
1719 /* If moving to a reset or error state, we must disable page faults on
1720 * this QP and flush all current page faults. Otherwise a stale page
1721 * fault may attempt to work on this QP after it is reset and moved
1722 * again to RTS, and may cause the driver and the device to get out of
1724 if (cur_state
!= IB_QPS_RESET
&& cur_state
!= IB_QPS_ERR
&&
1725 (new_state
== IB_QPS_RESET
|| new_state
== IB_QPS_ERR
))
1726 mlx5_ib_qp_disable_pagefaults(qp
);
1728 optpar
= ib_mask_to_mlx5_opt(attr_mask
);
1729 optpar
&= opt_mask
[mlx5_cur
][mlx5_new
][mlx5_st
];
1730 in
->optparam
= cpu_to_be32(optpar
);
1731 err
= mlx5_core_qp_modify(dev
->mdev
, to_mlx5_state(cur_state
),
1732 to_mlx5_state(new_state
), in
, sqd_event
,
1737 if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
1738 mlx5_ib_qp_enable_pagefaults(qp
);
1740 qp
->state
= new_state
;
1742 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
1743 qp
->atomic_rd_en
= attr
->qp_access_flags
;
1744 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
1745 qp
->resp_depth
= attr
->max_dest_rd_atomic
;
1746 if (attr_mask
& IB_QP_PORT
)
1747 qp
->port
= attr
->port_num
;
1748 if (attr_mask
& IB_QP_ALT_PATH
)
1749 qp
->alt_port
= attr
->alt_port_num
;
1752 * If we moved a kernel QP to RESET, clean up all old CQ
1753 * entries and reinitialize the QP.
1755 if (new_state
== IB_QPS_RESET
&& !ibqp
->uobject
) {
1756 mlx5_ib_cq_clean(recv_cq
, qp
->mqp
.qpn
,
1757 ibqp
->srq
? to_msrq(ibqp
->srq
) : NULL
);
1758 if (send_cq
!= recv_cq
)
1759 mlx5_ib_cq_clean(send_cq
, qp
->mqp
.qpn
, NULL
);
1765 qp
->sq
.cur_post
= 0;
1766 qp
->sq
.last_poll
= 0;
1767 qp
->db
.db
[MLX5_RCV_DBR
] = 0;
1768 qp
->db
.db
[MLX5_SND_DBR
] = 0;
1776 int mlx5_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
1777 int attr_mask
, struct ib_udata
*udata
)
1779 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
1780 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
1781 enum ib_qp_state cur_state
, new_state
;
1782 struct mlx5_general_caps
*gen
;
1786 gen
= &dev
->mdev
->caps
.gen
;
1787 mutex_lock(&qp
->mutex
);
1789 cur_state
= attr_mask
& IB_QP_CUR_STATE
? attr
->cur_qp_state
: qp
->state
;
1790 new_state
= attr_mask
& IB_QP_STATE
? attr
->qp_state
: cur_state
;
1792 if (ibqp
->qp_type
!= MLX5_IB_QPT_REG_UMR
&&
1793 !ib_modify_qp_is_ok(cur_state
, new_state
, ibqp
->qp_type
, attr_mask
,
1794 IB_LINK_LAYER_UNSPECIFIED
))
1797 if ((attr_mask
& IB_QP_PORT
) &&
1798 (attr
->port_num
== 0 || attr
->port_num
> gen
->num_ports
))
1801 if (attr_mask
& IB_QP_PKEY_INDEX
) {
1802 port
= attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
;
1803 if (attr
->pkey_index
>= gen
->port
[port
- 1].pkey_table_len
)
1807 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
&&
1808 attr
->max_rd_atomic
> (1 << gen
->log_max_ra_res_qp
))
1811 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
&&
1812 attr
->max_dest_rd_atomic
> (1 << gen
->log_max_ra_req_qp
))
1815 if (cur_state
== new_state
&& cur_state
== IB_QPS_RESET
) {
1820 err
= __mlx5_ib_modify_qp(ibqp
, attr
, attr_mask
, cur_state
, new_state
);
1823 mutex_unlock(&qp
->mutex
);
1827 static int mlx5_wq_overflow(struct mlx5_ib_wq
*wq
, int nreq
, struct ib_cq
*ib_cq
)
1829 struct mlx5_ib_cq
*cq
;
1832 cur
= wq
->head
- wq
->tail
;
1833 if (likely(cur
+ nreq
< wq
->max_post
))
1837 spin_lock(&cq
->lock
);
1838 cur
= wq
->head
- wq
->tail
;
1839 spin_unlock(&cq
->lock
);
1841 return cur
+ nreq
>= wq
->max_post
;
1844 static __always_inline
void set_raddr_seg(struct mlx5_wqe_raddr_seg
*rseg
,
1845 u64 remote_addr
, u32 rkey
)
1847 rseg
->raddr
= cpu_to_be64(remote_addr
);
1848 rseg
->rkey
= cpu_to_be32(rkey
);
1852 static void set_datagram_seg(struct mlx5_wqe_datagram_seg
*dseg
,
1853 struct ib_send_wr
*wr
)
1855 memcpy(&dseg
->av
, &to_mah(wr
->wr
.ud
.ah
)->av
, sizeof(struct mlx5_av
));
1856 dseg
->av
.dqp_dct
= cpu_to_be32(wr
->wr
.ud
.remote_qpn
| MLX5_EXTENDED_UD_AV
);
1857 dseg
->av
.key
.qkey
.qkey
= cpu_to_be32(wr
->wr
.ud
.remote_qkey
);
1860 static void set_data_ptr_seg(struct mlx5_wqe_data_seg
*dseg
, struct ib_sge
*sg
)
1862 dseg
->byte_count
= cpu_to_be32(sg
->length
);
1863 dseg
->lkey
= cpu_to_be32(sg
->lkey
);
1864 dseg
->addr
= cpu_to_be64(sg
->addr
);
1867 static __be16
get_klm_octo(int npages
)
1869 return cpu_to_be16(ALIGN(npages
, 8) / 2);
1872 static __be64
frwr_mkey_mask(void)
1876 result
= MLX5_MKEY_MASK_LEN
|
1877 MLX5_MKEY_MASK_PAGE_SIZE
|
1878 MLX5_MKEY_MASK_START_ADDR
|
1879 MLX5_MKEY_MASK_EN_RINVAL
|
1880 MLX5_MKEY_MASK_KEY
|
1886 MLX5_MKEY_MASK_SMALL_FENCE
|
1887 MLX5_MKEY_MASK_FREE
;
1889 return cpu_to_be64(result
);
1892 static __be64
sig_mkey_mask(void)
1896 result
= MLX5_MKEY_MASK_LEN
|
1897 MLX5_MKEY_MASK_PAGE_SIZE
|
1898 MLX5_MKEY_MASK_START_ADDR
|
1899 MLX5_MKEY_MASK_EN_SIGERR
|
1900 MLX5_MKEY_MASK_EN_RINVAL
|
1901 MLX5_MKEY_MASK_KEY
|
1906 MLX5_MKEY_MASK_SMALL_FENCE
|
1907 MLX5_MKEY_MASK_FREE
|
1908 MLX5_MKEY_MASK_BSF_EN
;
1910 return cpu_to_be64(result
);
1913 static void set_frwr_umr_segment(struct mlx5_wqe_umr_ctrl_seg
*umr
,
1914 struct ib_send_wr
*wr
, int li
)
1916 memset(umr
, 0, sizeof(*umr
));
1919 umr
->mkey_mask
= cpu_to_be64(MLX5_MKEY_MASK_FREE
);
1920 umr
->flags
= 1 << 7;
1924 umr
->flags
= (1 << 5); /* fail if not free */
1925 umr
->klm_octowords
= get_klm_octo(wr
->wr
.fast_reg
.page_list_len
);
1926 umr
->mkey_mask
= frwr_mkey_mask();
1929 static __be64
get_umr_reg_mr_mask(void)
1933 result
= MLX5_MKEY_MASK_LEN
|
1934 MLX5_MKEY_MASK_PAGE_SIZE
|
1935 MLX5_MKEY_MASK_START_ADDR
|
1939 MLX5_MKEY_MASK_KEY
|
1943 MLX5_MKEY_MASK_FREE
;
1945 return cpu_to_be64(result
);
1948 static __be64
get_umr_unreg_mr_mask(void)
1952 result
= MLX5_MKEY_MASK_FREE
;
1954 return cpu_to_be64(result
);
1957 static __be64
get_umr_update_mtt_mask(void)
1961 result
= MLX5_MKEY_MASK_FREE
;
1963 return cpu_to_be64(result
);
1966 static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg
*umr
,
1967 struct ib_send_wr
*wr
)
1969 struct mlx5_umr_wr
*umrwr
= (struct mlx5_umr_wr
*)&wr
->wr
.fast_reg
;
1971 memset(umr
, 0, sizeof(*umr
));
1973 if (wr
->send_flags
& MLX5_IB_SEND_UMR_FAIL_IF_FREE
)
1974 umr
->flags
= MLX5_UMR_CHECK_FREE
; /* fail if free */
1976 umr
->flags
= MLX5_UMR_CHECK_NOT_FREE
; /* fail if not free */
1978 if (!(wr
->send_flags
& MLX5_IB_SEND_UMR_UNREG
)) {
1979 umr
->klm_octowords
= get_klm_octo(umrwr
->npages
);
1980 if (wr
->send_flags
& MLX5_IB_SEND_UMR_UPDATE_MTT
) {
1981 umr
->mkey_mask
= get_umr_update_mtt_mask();
1982 umr
->bsf_octowords
= get_klm_octo(umrwr
->target
.offset
);
1983 umr
->flags
|= MLX5_UMR_TRANSLATION_OFFSET_EN
;
1985 umr
->mkey_mask
= get_umr_reg_mr_mask();
1988 umr
->mkey_mask
= get_umr_unreg_mr_mask();
1992 umr
->flags
|= MLX5_UMR_INLINE
;
1995 static u8
get_umr_flags(int acc
)
1997 return (acc
& IB_ACCESS_REMOTE_ATOMIC
? MLX5_PERM_ATOMIC
: 0) |
1998 (acc
& IB_ACCESS_REMOTE_WRITE
? MLX5_PERM_REMOTE_WRITE
: 0) |
1999 (acc
& IB_ACCESS_REMOTE_READ
? MLX5_PERM_REMOTE_READ
: 0) |
2000 (acc
& IB_ACCESS_LOCAL_WRITE
? MLX5_PERM_LOCAL_WRITE
: 0) |
2001 MLX5_PERM_LOCAL_READ
| MLX5_PERM_UMR_EN
;
2004 static void set_mkey_segment(struct mlx5_mkey_seg
*seg
, struct ib_send_wr
*wr
,
2007 memset(seg
, 0, sizeof(*seg
));
2009 seg
->status
= MLX5_MKEY_STATUS_FREE
;
2013 seg
->flags
= get_umr_flags(wr
->wr
.fast_reg
.access_flags
) |
2014 MLX5_ACCESS_MODE_MTT
;
2015 *writ
= seg
->flags
& (MLX5_PERM_LOCAL_WRITE
| IB_ACCESS_REMOTE_WRITE
);
2016 seg
->qpn_mkey7_0
= cpu_to_be32((wr
->wr
.fast_reg
.rkey
& 0xff) | 0xffffff00);
2017 seg
->flags_pd
= cpu_to_be32(MLX5_MKEY_REMOTE_INVAL
);
2018 seg
->start_addr
= cpu_to_be64(wr
->wr
.fast_reg
.iova_start
);
2019 seg
->len
= cpu_to_be64(wr
->wr
.fast_reg
.length
);
2020 seg
->xlt_oct_size
= cpu_to_be32((wr
->wr
.fast_reg
.page_list_len
+ 1) / 2);
2021 seg
->log2_page_size
= wr
->wr
.fast_reg
.page_shift
;
2024 static void set_reg_mkey_segment(struct mlx5_mkey_seg
*seg
, struct ib_send_wr
*wr
)
2026 struct mlx5_umr_wr
*umrwr
= (struct mlx5_umr_wr
*)&wr
->wr
.fast_reg
;
2028 memset(seg
, 0, sizeof(*seg
));
2029 if (wr
->send_flags
& MLX5_IB_SEND_UMR_UNREG
) {
2030 seg
->status
= MLX5_MKEY_STATUS_FREE
;
2034 seg
->flags
= convert_access(umrwr
->access_flags
);
2035 if (!(wr
->send_flags
& MLX5_IB_SEND_UMR_UPDATE_MTT
)) {
2036 seg
->flags_pd
= cpu_to_be32(to_mpd(umrwr
->pd
)->pdn
);
2037 seg
->start_addr
= cpu_to_be64(umrwr
->target
.virt_addr
);
2039 seg
->len
= cpu_to_be64(umrwr
->length
);
2040 seg
->log2_page_size
= umrwr
->page_shift
;
2041 seg
->qpn_mkey7_0
= cpu_to_be32(0xffffff00 |
2042 mlx5_mkey_variant(umrwr
->mkey
));
2045 static void set_frwr_pages(struct mlx5_wqe_data_seg
*dseg
,
2046 struct ib_send_wr
*wr
,
2047 struct mlx5_core_dev
*mdev
,
2048 struct mlx5_ib_pd
*pd
,
2051 struct mlx5_ib_fast_reg_page_list
*mfrpl
= to_mfrpl(wr
->wr
.fast_reg
.page_list
);
2052 u64
*page_list
= wr
->wr
.fast_reg
.page_list
->page_list
;
2053 u64 perm
= MLX5_EN_RD
| (writ
? MLX5_EN_WR
: 0);
2056 for (i
= 0; i
< wr
->wr
.fast_reg
.page_list_len
; i
++)
2057 mfrpl
->mapped_page_list
[i
] = cpu_to_be64(page_list
[i
] | perm
);
2058 dseg
->addr
= cpu_to_be64(mfrpl
->map
);
2059 dseg
->byte_count
= cpu_to_be32(ALIGN(sizeof(u64
) * wr
->wr
.fast_reg
.page_list_len
, 64));
2060 dseg
->lkey
= cpu_to_be32(pd
->pa_lkey
);
2063 static __be32
send_ieth(struct ib_send_wr
*wr
)
2065 switch (wr
->opcode
) {
2066 case IB_WR_SEND_WITH_IMM
:
2067 case IB_WR_RDMA_WRITE_WITH_IMM
:
2068 return wr
->ex
.imm_data
;
2070 case IB_WR_SEND_WITH_INV
:
2071 return cpu_to_be32(wr
->ex
.invalidate_rkey
);
2078 static u8
calc_sig(void *wqe
, int size
)
2084 for (i
= 0; i
< size
; i
++)
2090 static u8
wq_sig(void *wqe
)
2092 return calc_sig(wqe
, (*((u8
*)wqe
+ 8) & 0x3f) << 4);
2095 static int set_data_inl_seg(struct mlx5_ib_qp
*qp
, struct ib_send_wr
*wr
,
2098 struct mlx5_wqe_inline_seg
*seg
;
2099 void *qend
= qp
->sq
.qend
;
2107 wqe
+= sizeof(*seg
);
2108 for (i
= 0; i
< wr
->num_sge
; i
++) {
2109 addr
= (void *)(unsigned long)(wr
->sg_list
[i
].addr
);
2110 len
= wr
->sg_list
[i
].length
;
2113 if (unlikely(inl
> qp
->max_inline_data
))
2116 if (unlikely(wqe
+ len
> qend
)) {
2118 memcpy(wqe
, addr
, copy
);
2121 wqe
= mlx5_get_send_wqe(qp
, 0);
2123 memcpy(wqe
, addr
, len
);
2127 seg
->byte_count
= cpu_to_be32(inl
| MLX5_INLINE_SEG
);
2129 *sz
= ALIGN(inl
+ sizeof(seg
->byte_count
), 16) / 16;
2134 static u16
prot_field_size(enum ib_signature_type type
)
2137 case IB_SIG_TYPE_T10_DIF
:
2138 return MLX5_DIF_SIZE
;
2144 static u8
bs_selector(int block_size
)
2146 switch (block_size
) {
2147 case 512: return 0x1;
2148 case 520: return 0x2;
2149 case 4096: return 0x3;
2150 case 4160: return 0x4;
2151 case 1073741824: return 0x5;
2156 static void mlx5_fill_inl_bsf(struct ib_sig_domain
*domain
,
2157 struct mlx5_bsf_inl
*inl
)
2159 /* Valid inline section and allow BSF refresh */
2160 inl
->vld_refresh
= cpu_to_be16(MLX5_BSF_INL_VALID
|
2161 MLX5_BSF_REFRESH_DIF
);
2162 inl
->dif_apptag
= cpu_to_be16(domain
->sig
.dif
.app_tag
);
2163 inl
->dif_reftag
= cpu_to_be32(domain
->sig
.dif
.ref_tag
);
2164 /* repeating block */
2165 inl
->rp_inv_seed
= MLX5_BSF_REPEAT_BLOCK
;
2166 inl
->sig_type
= domain
->sig
.dif
.bg_type
== IB_T10DIF_CRC
?
2167 MLX5_DIF_CRC
: MLX5_DIF_IPCS
;
2169 if (domain
->sig
.dif
.ref_remap
)
2170 inl
->dif_inc_ref_guard_check
|= MLX5_BSF_INC_REFTAG
;
2172 if (domain
->sig
.dif
.app_escape
) {
2173 if (domain
->sig
.dif
.ref_escape
)
2174 inl
->dif_inc_ref_guard_check
|= MLX5_BSF_APPREF_ESCAPE
;
2176 inl
->dif_inc_ref_guard_check
|= MLX5_BSF_APPTAG_ESCAPE
;
2179 inl
->dif_app_bitmask_check
=
2180 cpu_to_be16(domain
->sig
.dif
.apptag_check_mask
);
2183 static int mlx5_set_bsf(struct ib_mr
*sig_mr
,
2184 struct ib_sig_attrs
*sig_attrs
,
2185 struct mlx5_bsf
*bsf
, u32 data_size
)
2187 struct mlx5_core_sig_ctx
*msig
= to_mmr(sig_mr
)->sig
;
2188 struct mlx5_bsf_basic
*basic
= &bsf
->basic
;
2189 struct ib_sig_domain
*mem
= &sig_attrs
->mem
;
2190 struct ib_sig_domain
*wire
= &sig_attrs
->wire
;
2192 memset(bsf
, 0, sizeof(*bsf
));
2194 /* Basic + Extended + Inline */
2195 basic
->bsf_size_sbs
= 1 << 7;
2196 /* Input domain check byte mask */
2197 basic
->check_byte_mask
= sig_attrs
->check_mask
;
2198 basic
->raw_data_size
= cpu_to_be32(data_size
);
2201 switch (sig_attrs
->mem
.sig_type
) {
2202 case IB_SIG_TYPE_NONE
:
2204 case IB_SIG_TYPE_T10_DIF
:
2205 basic
->mem
.bs_selector
= bs_selector(mem
->sig
.dif
.pi_interval
);
2206 basic
->m_bfs_psv
= cpu_to_be32(msig
->psv_memory
.psv_idx
);
2207 mlx5_fill_inl_bsf(mem
, &bsf
->m_inl
);
2214 switch (sig_attrs
->wire
.sig_type
) {
2215 case IB_SIG_TYPE_NONE
:
2217 case IB_SIG_TYPE_T10_DIF
:
2218 if (mem
->sig
.dif
.pi_interval
== wire
->sig
.dif
.pi_interval
&&
2219 mem
->sig_type
== wire
->sig_type
) {
2220 /* Same block structure */
2221 basic
->bsf_size_sbs
|= 1 << 4;
2222 if (mem
->sig
.dif
.bg_type
== wire
->sig
.dif
.bg_type
)
2223 basic
->wire
.copy_byte_mask
|= MLX5_CPY_GRD_MASK
;
2224 if (mem
->sig
.dif
.app_tag
== wire
->sig
.dif
.app_tag
)
2225 basic
->wire
.copy_byte_mask
|= MLX5_CPY_APP_MASK
;
2226 if (mem
->sig
.dif
.ref_tag
== wire
->sig
.dif
.ref_tag
)
2227 basic
->wire
.copy_byte_mask
|= MLX5_CPY_REF_MASK
;
2229 basic
->wire
.bs_selector
= bs_selector(wire
->sig
.dif
.pi_interval
);
2231 basic
->w_bfs_psv
= cpu_to_be32(msig
->psv_wire
.psv_idx
);
2232 mlx5_fill_inl_bsf(wire
, &bsf
->w_inl
);
2241 static int set_sig_data_segment(struct ib_send_wr
*wr
, struct mlx5_ib_qp
*qp
,
2242 void **seg
, int *size
)
2244 struct ib_sig_attrs
*sig_attrs
= wr
->wr
.sig_handover
.sig_attrs
;
2245 struct ib_mr
*sig_mr
= wr
->wr
.sig_handover
.sig_mr
;
2246 struct mlx5_bsf
*bsf
;
2247 u32 data_len
= wr
->sg_list
->length
;
2248 u32 data_key
= wr
->sg_list
->lkey
;
2249 u64 data_va
= wr
->sg_list
->addr
;
2253 if (!wr
->wr
.sig_handover
.prot
||
2254 (data_key
== wr
->wr
.sig_handover
.prot
->lkey
&&
2255 data_va
== wr
->wr
.sig_handover
.prot
->addr
&&
2256 data_len
== wr
->wr
.sig_handover
.prot
->length
)) {
2258 * Source domain doesn't contain signature information
2259 * or data and protection are interleaved in memory.
2260 * So need construct:
2261 * ------------------
2263 * ------------------
2265 * ------------------
2267 struct mlx5_klm
*data_klm
= *seg
;
2269 data_klm
->bcount
= cpu_to_be32(data_len
);
2270 data_klm
->key
= cpu_to_be32(data_key
);
2271 data_klm
->va
= cpu_to_be64(data_va
);
2272 wqe_size
= ALIGN(sizeof(*data_klm
), 64);
2275 * Source domain contains signature information
2276 * So need construct a strided block format:
2277 * ---------------------------
2278 * | stride_block_ctrl |
2279 * ---------------------------
2281 * ---------------------------
2283 * ---------------------------
2285 * ---------------------------
2287 struct mlx5_stride_block_ctrl_seg
*sblock_ctrl
;
2288 struct mlx5_stride_block_entry
*data_sentry
;
2289 struct mlx5_stride_block_entry
*prot_sentry
;
2290 u32 prot_key
= wr
->wr
.sig_handover
.prot
->lkey
;
2291 u64 prot_va
= wr
->wr
.sig_handover
.prot
->addr
;
2292 u16 block_size
= sig_attrs
->mem
.sig
.dif
.pi_interval
;
2296 data_sentry
= (void *)sblock_ctrl
+ sizeof(*sblock_ctrl
);
2297 prot_sentry
= (void *)data_sentry
+ sizeof(*data_sentry
);
2299 prot_size
= prot_field_size(sig_attrs
->mem
.sig_type
);
2301 pr_err("Bad block size given: %u\n", block_size
);
2304 sblock_ctrl
->bcount_per_cycle
= cpu_to_be32(block_size
+
2306 sblock_ctrl
->op
= cpu_to_be32(MLX5_STRIDE_BLOCK_OP
);
2307 sblock_ctrl
->repeat_count
= cpu_to_be32(data_len
/ block_size
);
2308 sblock_ctrl
->num_entries
= cpu_to_be16(2);
2310 data_sentry
->bcount
= cpu_to_be16(block_size
);
2311 data_sentry
->key
= cpu_to_be32(data_key
);
2312 data_sentry
->va
= cpu_to_be64(data_va
);
2313 data_sentry
->stride
= cpu_to_be16(block_size
);
2315 prot_sentry
->bcount
= cpu_to_be16(prot_size
);
2316 prot_sentry
->key
= cpu_to_be32(prot_key
);
2317 prot_sentry
->va
= cpu_to_be64(prot_va
);
2318 prot_sentry
->stride
= cpu_to_be16(prot_size
);
2320 wqe_size
= ALIGN(sizeof(*sblock_ctrl
) + sizeof(*data_sentry
) +
2321 sizeof(*prot_sentry
), 64);
2325 *size
+= wqe_size
/ 16;
2326 if (unlikely((*seg
== qp
->sq
.qend
)))
2327 *seg
= mlx5_get_send_wqe(qp
, 0);
2330 ret
= mlx5_set_bsf(sig_mr
, sig_attrs
, bsf
, data_len
);
2334 *seg
+= sizeof(*bsf
);
2335 *size
+= sizeof(*bsf
) / 16;
2336 if (unlikely((*seg
== qp
->sq
.qend
)))
2337 *seg
= mlx5_get_send_wqe(qp
, 0);
2342 static void set_sig_mkey_segment(struct mlx5_mkey_seg
*seg
,
2343 struct ib_send_wr
*wr
, u32 nelements
,
2344 u32 length
, u32 pdn
)
2346 struct ib_mr
*sig_mr
= wr
->wr
.sig_handover
.sig_mr
;
2347 u32 sig_key
= sig_mr
->rkey
;
2348 u8 sigerr
= to_mmr(sig_mr
)->sig
->sigerr_count
& 1;
2350 memset(seg
, 0, sizeof(*seg
));
2352 seg
->flags
= get_umr_flags(wr
->wr
.sig_handover
.access_flags
) |
2353 MLX5_ACCESS_MODE_KLM
;
2354 seg
->qpn_mkey7_0
= cpu_to_be32((sig_key
& 0xff) | 0xffffff00);
2355 seg
->flags_pd
= cpu_to_be32(MLX5_MKEY_REMOTE_INVAL
| sigerr
<< 26 |
2356 MLX5_MKEY_BSF_EN
| pdn
);
2357 seg
->len
= cpu_to_be64(length
);
2358 seg
->xlt_oct_size
= cpu_to_be32(be16_to_cpu(get_klm_octo(nelements
)));
2359 seg
->bsfs_octo_size
= cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE
);
2362 static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg
*umr
,
2363 struct ib_send_wr
*wr
, u32 nelements
)
2365 memset(umr
, 0, sizeof(*umr
));
2367 umr
->flags
= MLX5_FLAGS_INLINE
| MLX5_FLAGS_CHECK_FREE
;
2368 umr
->klm_octowords
= get_klm_octo(nelements
);
2369 umr
->bsf_octowords
= cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE
);
2370 umr
->mkey_mask
= sig_mkey_mask();
2374 static int set_sig_umr_wr(struct ib_send_wr
*wr
, struct mlx5_ib_qp
*qp
,
2375 void **seg
, int *size
)
2377 struct mlx5_ib_mr
*sig_mr
= to_mmr(wr
->wr
.sig_handover
.sig_mr
);
2378 u32 pdn
= get_pd(qp
)->pdn
;
2380 int region_len
, ret
;
2382 if (unlikely(wr
->num_sge
!= 1) ||
2383 unlikely(wr
->wr
.sig_handover
.access_flags
&
2384 IB_ACCESS_REMOTE_ATOMIC
) ||
2385 unlikely(!sig_mr
->sig
) || unlikely(!qp
->signature_en
) ||
2386 unlikely(!sig_mr
->sig
->sig_status_checked
))
2389 /* length of the protected region, data + protection */
2390 region_len
= wr
->sg_list
->length
;
2391 if (wr
->wr
.sig_handover
.prot
&&
2392 (wr
->wr
.sig_handover
.prot
->lkey
!= wr
->sg_list
->lkey
||
2393 wr
->wr
.sig_handover
.prot
->addr
!= wr
->sg_list
->addr
||
2394 wr
->wr
.sig_handover
.prot
->length
!= wr
->sg_list
->length
))
2395 region_len
+= wr
->wr
.sig_handover
.prot
->length
;
2398 * KLM octoword size - if protection was provided
2399 * then we use strided block format (3 octowords),
2400 * else we use single KLM (1 octoword)
2402 klm_oct_size
= wr
->wr
.sig_handover
.prot
? 3 : 1;
2404 set_sig_umr_segment(*seg
, wr
, klm_oct_size
);
2405 *seg
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
);
2406 *size
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
) / 16;
2407 if (unlikely((*seg
== qp
->sq
.qend
)))
2408 *seg
= mlx5_get_send_wqe(qp
, 0);
2410 set_sig_mkey_segment(*seg
, wr
, klm_oct_size
, region_len
, pdn
);
2411 *seg
+= sizeof(struct mlx5_mkey_seg
);
2412 *size
+= sizeof(struct mlx5_mkey_seg
) / 16;
2413 if (unlikely((*seg
== qp
->sq
.qend
)))
2414 *seg
= mlx5_get_send_wqe(qp
, 0);
2416 ret
= set_sig_data_segment(wr
, qp
, seg
, size
);
2420 sig_mr
->sig
->sig_status_checked
= false;
2424 static int set_psv_wr(struct ib_sig_domain
*domain
,
2425 u32 psv_idx
, void **seg
, int *size
)
2427 struct mlx5_seg_set_psv
*psv_seg
= *seg
;
2429 memset(psv_seg
, 0, sizeof(*psv_seg
));
2430 psv_seg
->psv_num
= cpu_to_be32(psv_idx
);
2431 switch (domain
->sig_type
) {
2432 case IB_SIG_TYPE_NONE
:
2434 case IB_SIG_TYPE_T10_DIF
:
2435 psv_seg
->transient_sig
= cpu_to_be32(domain
->sig
.dif
.bg
<< 16 |
2436 domain
->sig
.dif
.app_tag
);
2437 psv_seg
->ref_tag
= cpu_to_be32(domain
->sig
.dif
.ref_tag
);
2440 pr_err("Bad signature type given.\n");
2444 *seg
+= sizeof(*psv_seg
);
2445 *size
+= sizeof(*psv_seg
) / 16;
2450 static int set_frwr_li_wr(void **seg
, struct ib_send_wr
*wr
, int *size
,
2451 struct mlx5_core_dev
*mdev
, struct mlx5_ib_pd
*pd
, struct mlx5_ib_qp
*qp
)
2456 li
= wr
->opcode
== IB_WR_LOCAL_INV
? 1 : 0;
2457 if (unlikely(wr
->send_flags
& IB_SEND_INLINE
))
2460 set_frwr_umr_segment(*seg
, wr
, li
);
2461 *seg
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
);
2462 *size
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
) / 16;
2463 if (unlikely((*seg
== qp
->sq
.qend
)))
2464 *seg
= mlx5_get_send_wqe(qp
, 0);
2465 set_mkey_segment(*seg
, wr
, li
, &writ
);
2466 *seg
+= sizeof(struct mlx5_mkey_seg
);
2467 *size
+= sizeof(struct mlx5_mkey_seg
) / 16;
2468 if (unlikely((*seg
== qp
->sq
.qend
)))
2469 *seg
= mlx5_get_send_wqe(qp
, 0);
2471 if (unlikely(wr
->wr
.fast_reg
.page_list_len
>
2472 wr
->wr
.fast_reg
.page_list
->max_page_list_len
))
2475 set_frwr_pages(*seg
, wr
, mdev
, pd
, writ
);
2476 *seg
+= sizeof(struct mlx5_wqe_data_seg
);
2477 *size
+= (sizeof(struct mlx5_wqe_data_seg
) / 16);
2482 static void dump_wqe(struct mlx5_ib_qp
*qp
, int idx
, int size_16
)
2488 pr_debug("dump wqe at %p\n", mlx5_get_send_wqe(qp
, tidx
));
2489 for (i
= 0, j
= 0; i
< size_16
* 4; i
+= 4, j
+= 4) {
2490 if ((i
& 0xf) == 0) {
2491 void *buf
= mlx5_get_send_wqe(qp
, tidx
);
2492 tidx
= (tidx
+ 1) & (qp
->sq
.wqe_cnt
- 1);
2496 pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p
[j
]),
2497 be32_to_cpu(p
[j
+ 1]), be32_to_cpu(p
[j
+ 2]),
2498 be32_to_cpu(p
[j
+ 3]));
2502 static void mlx5_bf_copy(u64 __iomem
*dst
, u64
*src
,
2503 unsigned bytecnt
, struct mlx5_ib_qp
*qp
)
2505 while (bytecnt
> 0) {
2506 __iowrite64_copy(dst
++, src
++, 8);
2507 __iowrite64_copy(dst
++, src
++, 8);
2508 __iowrite64_copy(dst
++, src
++, 8);
2509 __iowrite64_copy(dst
++, src
++, 8);
2510 __iowrite64_copy(dst
++, src
++, 8);
2511 __iowrite64_copy(dst
++, src
++, 8);
2512 __iowrite64_copy(dst
++, src
++, 8);
2513 __iowrite64_copy(dst
++, src
++, 8);
2515 if (unlikely(src
== qp
->sq
.qend
))
2516 src
= mlx5_get_send_wqe(qp
, 0);
2520 static u8
get_fence(u8 fence
, struct ib_send_wr
*wr
)
2522 if (unlikely(wr
->opcode
== IB_WR_LOCAL_INV
&&
2523 wr
->send_flags
& IB_SEND_FENCE
))
2524 return MLX5_FENCE_MODE_STRONG_ORDERING
;
2526 if (unlikely(fence
)) {
2527 if (wr
->send_flags
& IB_SEND_FENCE
)
2528 return MLX5_FENCE_MODE_SMALL_AND_FENCE
;
2537 static int begin_wqe(struct mlx5_ib_qp
*qp
, void **seg
,
2538 struct mlx5_wqe_ctrl_seg
**ctrl
,
2539 struct ib_send_wr
*wr
, unsigned *idx
,
2540 int *size
, int nreq
)
2544 if (unlikely(mlx5_wq_overflow(&qp
->sq
, nreq
, qp
->ibqp
.send_cq
))) {
2549 *idx
= qp
->sq
.cur_post
& (qp
->sq
.wqe_cnt
- 1);
2550 *seg
= mlx5_get_send_wqe(qp
, *idx
);
2552 *(uint32_t *)(*seg
+ 8) = 0;
2553 (*ctrl
)->imm
= send_ieth(wr
);
2554 (*ctrl
)->fm_ce_se
= qp
->sq_signal_bits
|
2555 (wr
->send_flags
& IB_SEND_SIGNALED
?
2556 MLX5_WQE_CTRL_CQ_UPDATE
: 0) |
2557 (wr
->send_flags
& IB_SEND_SOLICITED
?
2558 MLX5_WQE_CTRL_SOLICITED
: 0);
2560 *seg
+= sizeof(**ctrl
);
2561 *size
= sizeof(**ctrl
) / 16;
2566 static void finish_wqe(struct mlx5_ib_qp
*qp
,
2567 struct mlx5_wqe_ctrl_seg
*ctrl
,
2568 u8 size
, unsigned idx
, u64 wr_id
,
2569 int nreq
, u8 fence
, u8 next_fence
,
2574 ctrl
->opmod_idx_opcode
= cpu_to_be32(((u32
)(qp
->sq
.cur_post
) << 8) |
2575 mlx5_opcode
| ((u32
)opmod
<< 24));
2576 ctrl
->qpn_ds
= cpu_to_be32(size
| (qp
->mqp
.qpn
<< 8));
2577 ctrl
->fm_ce_se
|= fence
;
2578 qp
->fm_cache
= next_fence
;
2579 if (unlikely(qp
->wq_sig
))
2580 ctrl
->signature
= wq_sig(ctrl
);
2582 qp
->sq
.wrid
[idx
] = wr_id
;
2583 qp
->sq
.w_list
[idx
].opcode
= mlx5_opcode
;
2584 qp
->sq
.wqe_head
[idx
] = qp
->sq
.head
+ nreq
;
2585 qp
->sq
.cur_post
+= DIV_ROUND_UP(size
* 16, MLX5_SEND_WQE_BB
);
2586 qp
->sq
.w_list
[idx
].next
= qp
->sq
.cur_post
;
2590 int mlx5_ib_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
2591 struct ib_send_wr
**bad_wr
)
2593 struct mlx5_wqe_ctrl_seg
*ctrl
= NULL
; /* compiler warning */
2594 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
2595 struct mlx5_core_dev
*mdev
= dev
->mdev
;
2596 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
2597 struct mlx5_ib_mr
*mr
;
2598 struct mlx5_wqe_data_seg
*dpseg
;
2599 struct mlx5_wqe_xrc_seg
*xrc
;
2600 struct mlx5_bf
*bf
= qp
->bf
;
2601 int uninitialized_var(size
);
2602 void *qend
= qp
->sq
.qend
;
2603 unsigned long flags
;
2614 spin_lock_irqsave(&qp
->sq
.lock
, flags
);
2616 for (nreq
= 0; wr
; nreq
++, wr
= wr
->next
) {
2617 if (unlikely(wr
->opcode
>= ARRAY_SIZE(mlx5_ib_opcode
))) {
2618 mlx5_ib_warn(dev
, "\n");
2624 fence
= qp
->fm_cache
;
2625 num_sge
= wr
->num_sge
;
2626 if (unlikely(num_sge
> qp
->sq
.max_gs
)) {
2627 mlx5_ib_warn(dev
, "\n");
2633 err
= begin_wqe(qp
, &seg
, &ctrl
, wr
, &idx
, &size
, nreq
);
2635 mlx5_ib_warn(dev
, "\n");
2641 switch (ibqp
->qp_type
) {
2642 case IB_QPT_XRC_INI
:
2644 xrc
->xrc_srqn
= htonl(wr
->xrc_remote_srq_num
);
2645 seg
+= sizeof(*xrc
);
2646 size
+= sizeof(*xrc
) / 16;
2649 switch (wr
->opcode
) {
2650 case IB_WR_RDMA_READ
:
2651 case IB_WR_RDMA_WRITE
:
2652 case IB_WR_RDMA_WRITE_WITH_IMM
:
2653 set_raddr_seg(seg
, wr
->wr
.rdma
.remote_addr
,
2655 seg
+= sizeof(struct mlx5_wqe_raddr_seg
);
2656 size
+= sizeof(struct mlx5_wqe_raddr_seg
) / 16;
2659 case IB_WR_ATOMIC_CMP_AND_SWP
:
2660 case IB_WR_ATOMIC_FETCH_AND_ADD
:
2661 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP
:
2662 mlx5_ib_warn(dev
, "Atomic operations are not supported yet\n");
2667 case IB_WR_LOCAL_INV
:
2668 next_fence
= MLX5_FENCE_MODE_INITIATOR_SMALL
;
2669 qp
->sq
.wr_data
[idx
] = IB_WR_LOCAL_INV
;
2670 ctrl
->imm
= cpu_to_be32(wr
->ex
.invalidate_rkey
);
2671 err
= set_frwr_li_wr(&seg
, wr
, &size
, mdev
, to_mpd(ibqp
->pd
), qp
);
2673 mlx5_ib_warn(dev
, "\n");
2680 case IB_WR_FAST_REG_MR
:
2681 next_fence
= MLX5_FENCE_MODE_INITIATOR_SMALL
;
2682 qp
->sq
.wr_data
[idx
] = IB_WR_FAST_REG_MR
;
2683 ctrl
->imm
= cpu_to_be32(wr
->wr
.fast_reg
.rkey
);
2684 err
= set_frwr_li_wr(&seg
, wr
, &size
, mdev
, to_mpd(ibqp
->pd
), qp
);
2686 mlx5_ib_warn(dev
, "\n");
2693 case IB_WR_REG_SIG_MR
:
2694 qp
->sq
.wr_data
[idx
] = IB_WR_REG_SIG_MR
;
2695 mr
= to_mmr(wr
->wr
.sig_handover
.sig_mr
);
2697 ctrl
->imm
= cpu_to_be32(mr
->ibmr
.rkey
);
2698 err
= set_sig_umr_wr(wr
, qp
, &seg
, &size
);
2700 mlx5_ib_warn(dev
, "\n");
2705 finish_wqe(qp
, ctrl
, size
, idx
, wr
->wr_id
,
2706 nreq
, get_fence(fence
, wr
),
2707 next_fence
, MLX5_OPCODE_UMR
);
2709 * SET_PSV WQEs are not signaled and solicited
2712 wr
->send_flags
&= ~IB_SEND_SIGNALED
;
2713 wr
->send_flags
|= IB_SEND_SOLICITED
;
2714 err
= begin_wqe(qp
, &seg
, &ctrl
, wr
,
2717 mlx5_ib_warn(dev
, "\n");
2723 err
= set_psv_wr(&wr
->wr
.sig_handover
.sig_attrs
->mem
,
2724 mr
->sig
->psv_memory
.psv_idx
, &seg
,
2727 mlx5_ib_warn(dev
, "\n");
2732 finish_wqe(qp
, ctrl
, size
, idx
, wr
->wr_id
,
2733 nreq
, get_fence(fence
, wr
),
2734 next_fence
, MLX5_OPCODE_SET_PSV
);
2735 err
= begin_wqe(qp
, &seg
, &ctrl
, wr
,
2738 mlx5_ib_warn(dev
, "\n");
2744 next_fence
= MLX5_FENCE_MODE_INITIATOR_SMALL
;
2745 err
= set_psv_wr(&wr
->wr
.sig_handover
.sig_attrs
->wire
,
2746 mr
->sig
->psv_wire
.psv_idx
, &seg
,
2749 mlx5_ib_warn(dev
, "\n");
2754 finish_wqe(qp
, ctrl
, size
, idx
, wr
->wr_id
,
2755 nreq
, get_fence(fence
, wr
),
2756 next_fence
, MLX5_OPCODE_SET_PSV
);
2766 switch (wr
->opcode
) {
2767 case IB_WR_RDMA_WRITE
:
2768 case IB_WR_RDMA_WRITE_WITH_IMM
:
2769 set_raddr_seg(seg
, wr
->wr
.rdma
.remote_addr
,
2771 seg
+= sizeof(struct mlx5_wqe_raddr_seg
);
2772 size
+= sizeof(struct mlx5_wqe_raddr_seg
) / 16;
2783 set_datagram_seg(seg
, wr
);
2784 seg
+= sizeof(struct mlx5_wqe_datagram_seg
);
2785 size
+= sizeof(struct mlx5_wqe_datagram_seg
) / 16;
2786 if (unlikely((seg
== qend
)))
2787 seg
= mlx5_get_send_wqe(qp
, 0);
2790 case MLX5_IB_QPT_REG_UMR
:
2791 if (wr
->opcode
!= MLX5_IB_WR_UMR
) {
2793 mlx5_ib_warn(dev
, "bad opcode\n");
2796 qp
->sq
.wr_data
[idx
] = MLX5_IB_WR_UMR
;
2797 ctrl
->imm
= cpu_to_be32(wr
->wr
.fast_reg
.rkey
);
2798 set_reg_umr_segment(seg
, wr
);
2799 seg
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
);
2800 size
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
) / 16;
2801 if (unlikely((seg
== qend
)))
2802 seg
= mlx5_get_send_wqe(qp
, 0);
2803 set_reg_mkey_segment(seg
, wr
);
2804 seg
+= sizeof(struct mlx5_mkey_seg
);
2805 size
+= sizeof(struct mlx5_mkey_seg
) / 16;
2806 if (unlikely((seg
== qend
)))
2807 seg
= mlx5_get_send_wqe(qp
, 0);
2814 if (wr
->send_flags
& IB_SEND_INLINE
&& num_sge
) {
2815 int uninitialized_var(sz
);
2817 err
= set_data_inl_seg(qp
, wr
, seg
, &sz
);
2818 if (unlikely(err
)) {
2819 mlx5_ib_warn(dev
, "\n");
2827 for (i
= 0; i
< num_sge
; i
++) {
2828 if (unlikely(dpseg
== qend
)) {
2829 seg
= mlx5_get_send_wqe(qp
, 0);
2832 if (likely(wr
->sg_list
[i
].length
)) {
2833 set_data_ptr_seg(dpseg
, wr
->sg_list
+ i
);
2834 size
+= sizeof(struct mlx5_wqe_data_seg
) / 16;
2840 finish_wqe(qp
, ctrl
, size
, idx
, wr
->wr_id
, nreq
,
2841 get_fence(fence
, wr
), next_fence
,
2842 mlx5_ib_opcode
[wr
->opcode
]);
2845 dump_wqe(qp
, idx
, size
);
2850 qp
->sq
.head
+= nreq
;
2852 /* Make sure that descriptors are written before
2853 * updating doorbell record and ringing the doorbell
2857 qp
->db
.db
[MLX5_SND_DBR
] = cpu_to_be32(qp
->sq
.cur_post
);
2859 /* Make sure doorbell record is visible to the HCA before
2860 * we hit doorbell */
2864 spin_lock(&bf
->lock
);
2866 __acquire(&bf
->lock
);
2869 if (0 && nreq
== 1 && bf
->uuarn
&& inl
&& size
> 1 && size
<= bf
->buf_size
/ 16) {
2870 mlx5_bf_copy(bf
->reg
+ bf
->offset
, (u64
*)ctrl
, ALIGN(size
* 16, 64), qp
);
2873 mlx5_write64((__be32
*)ctrl
, bf
->regreg
+ bf
->offset
,
2874 MLX5_GET_DOORBELL_LOCK(&bf
->lock32
));
2875 /* Make sure doorbells don't leak out of SQ spinlock
2876 * and reach the HCA out of order.
2880 bf
->offset
^= bf
->buf_size
;
2882 spin_unlock(&bf
->lock
);
2884 __release(&bf
->lock
);
2887 spin_unlock_irqrestore(&qp
->sq
.lock
, flags
);
2892 static void set_sig_seg(struct mlx5_rwqe_sig
*sig
, int size
)
2894 sig
->signature
= calc_sig(sig
, size
);
2897 int mlx5_ib_post_recv(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
2898 struct ib_recv_wr
**bad_wr
)
2900 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
2901 struct mlx5_wqe_data_seg
*scat
;
2902 struct mlx5_rwqe_sig
*sig
;
2903 unsigned long flags
;
2909 spin_lock_irqsave(&qp
->rq
.lock
, flags
);
2911 ind
= qp
->rq
.head
& (qp
->rq
.wqe_cnt
- 1);
2913 for (nreq
= 0; wr
; nreq
++, wr
= wr
->next
) {
2914 if (mlx5_wq_overflow(&qp
->rq
, nreq
, qp
->ibqp
.recv_cq
)) {
2920 if (unlikely(wr
->num_sge
> qp
->rq
.max_gs
)) {
2926 scat
= get_recv_wqe(qp
, ind
);
2930 for (i
= 0; i
< wr
->num_sge
; i
++)
2931 set_data_ptr_seg(scat
+ i
, wr
->sg_list
+ i
);
2933 if (i
< qp
->rq
.max_gs
) {
2934 scat
[i
].byte_count
= 0;
2935 scat
[i
].lkey
= cpu_to_be32(MLX5_INVALID_LKEY
);
2940 sig
= (struct mlx5_rwqe_sig
*)scat
;
2941 set_sig_seg(sig
, (qp
->rq
.max_gs
+ 1) << 2);
2944 qp
->rq
.wrid
[ind
] = wr
->wr_id
;
2946 ind
= (ind
+ 1) & (qp
->rq
.wqe_cnt
- 1);
2951 qp
->rq
.head
+= nreq
;
2953 /* Make sure that descriptors are written before
2958 *qp
->db
.db
= cpu_to_be32(qp
->rq
.head
& 0xffff);
2961 spin_unlock_irqrestore(&qp
->rq
.lock
, flags
);
2966 static inline enum ib_qp_state
to_ib_qp_state(enum mlx5_qp_state mlx5_state
)
2968 switch (mlx5_state
) {
2969 case MLX5_QP_STATE_RST
: return IB_QPS_RESET
;
2970 case MLX5_QP_STATE_INIT
: return IB_QPS_INIT
;
2971 case MLX5_QP_STATE_RTR
: return IB_QPS_RTR
;
2972 case MLX5_QP_STATE_RTS
: return IB_QPS_RTS
;
2973 case MLX5_QP_STATE_SQ_DRAINING
:
2974 case MLX5_QP_STATE_SQD
: return IB_QPS_SQD
;
2975 case MLX5_QP_STATE_SQER
: return IB_QPS_SQE
;
2976 case MLX5_QP_STATE_ERR
: return IB_QPS_ERR
;
2981 static inline enum ib_mig_state
to_ib_mig_state(int mlx5_mig_state
)
2983 switch (mlx5_mig_state
) {
2984 case MLX5_QP_PM_ARMED
: return IB_MIG_ARMED
;
2985 case MLX5_QP_PM_REARM
: return IB_MIG_REARM
;
2986 case MLX5_QP_PM_MIGRATED
: return IB_MIG_MIGRATED
;
2991 static int to_ib_qp_access_flags(int mlx5_flags
)
2995 if (mlx5_flags
& MLX5_QP_BIT_RRE
)
2996 ib_flags
|= IB_ACCESS_REMOTE_READ
;
2997 if (mlx5_flags
& MLX5_QP_BIT_RWE
)
2998 ib_flags
|= IB_ACCESS_REMOTE_WRITE
;
2999 if (mlx5_flags
& MLX5_QP_BIT_RAE
)
3000 ib_flags
|= IB_ACCESS_REMOTE_ATOMIC
;
3005 static void to_ib_ah_attr(struct mlx5_ib_dev
*ibdev
, struct ib_ah_attr
*ib_ah_attr
,
3006 struct mlx5_qp_path
*path
)
3008 struct mlx5_core_dev
*dev
= ibdev
->mdev
;
3010 memset(ib_ah_attr
, 0, sizeof(*ib_ah_attr
));
3011 ib_ah_attr
->port_num
= path
->port
;
3013 if (ib_ah_attr
->port_num
== 0 ||
3014 ib_ah_attr
->port_num
> dev
->caps
.gen
.num_ports
)
3017 ib_ah_attr
->sl
= path
->sl
& 0xf;
3019 ib_ah_attr
->dlid
= be16_to_cpu(path
->rlid
);
3020 ib_ah_attr
->src_path_bits
= path
->grh_mlid
& 0x7f;
3021 ib_ah_attr
->static_rate
= path
->static_rate
? path
->static_rate
- 5 : 0;
3022 ib_ah_attr
->ah_flags
= (path
->grh_mlid
& (1 << 7)) ? IB_AH_GRH
: 0;
3023 if (ib_ah_attr
->ah_flags
) {
3024 ib_ah_attr
->grh
.sgid_index
= path
->mgid_index
;
3025 ib_ah_attr
->grh
.hop_limit
= path
->hop_limit
;
3026 ib_ah_attr
->grh
.traffic_class
=
3027 (be32_to_cpu(path
->tclass_flowlabel
) >> 20) & 0xff;
3028 ib_ah_attr
->grh
.flow_label
=
3029 be32_to_cpu(path
->tclass_flowlabel
) & 0xfffff;
3030 memcpy(ib_ah_attr
->grh
.dgid
.raw
,
3031 path
->rgid
, sizeof(ib_ah_attr
->grh
.dgid
.raw
));
3035 int mlx5_ib_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*qp_attr
, int qp_attr_mask
,
3036 struct ib_qp_init_attr
*qp_init_attr
)
3038 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
3039 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
3040 struct mlx5_query_qp_mbox_out
*outb
;
3041 struct mlx5_qp_context
*context
;
3045 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
3047 * Wait for any outstanding page faults, in case the user frees memory
3048 * based upon this query's result.
3050 flush_workqueue(mlx5_ib_page_fault_wq
);
3053 mutex_lock(&qp
->mutex
);
3054 outb
= kzalloc(sizeof(*outb
), GFP_KERNEL
);
3059 context
= &outb
->ctx
;
3060 err
= mlx5_core_qp_query(dev
->mdev
, &qp
->mqp
, outb
, sizeof(*outb
));
3064 mlx5_state
= be32_to_cpu(context
->flags
) >> 28;
3066 qp
->state
= to_ib_qp_state(mlx5_state
);
3067 qp_attr
->qp_state
= qp
->state
;
3068 qp_attr
->path_mtu
= context
->mtu_msgmax
>> 5;
3069 qp_attr
->path_mig_state
=
3070 to_ib_mig_state((be32_to_cpu(context
->flags
) >> 11) & 0x3);
3071 qp_attr
->qkey
= be32_to_cpu(context
->qkey
);
3072 qp_attr
->rq_psn
= be32_to_cpu(context
->rnr_nextrecvpsn
) & 0xffffff;
3073 qp_attr
->sq_psn
= be32_to_cpu(context
->next_send_psn
) & 0xffffff;
3074 qp_attr
->dest_qp_num
= be32_to_cpu(context
->log_pg_sz_remote_qpn
) & 0xffffff;
3075 qp_attr
->qp_access_flags
=
3076 to_ib_qp_access_flags(be32_to_cpu(context
->params2
));
3078 if (qp
->ibqp
.qp_type
== IB_QPT_RC
|| qp
->ibqp
.qp_type
== IB_QPT_UC
) {
3079 to_ib_ah_attr(dev
, &qp_attr
->ah_attr
, &context
->pri_path
);
3080 to_ib_ah_attr(dev
, &qp_attr
->alt_ah_attr
, &context
->alt_path
);
3081 qp_attr
->alt_pkey_index
= context
->alt_path
.pkey_index
& 0x7f;
3082 qp_attr
->alt_port_num
= qp_attr
->alt_ah_attr
.port_num
;
3085 qp_attr
->pkey_index
= context
->pri_path
.pkey_index
& 0x7f;
3086 qp_attr
->port_num
= context
->pri_path
.port
;
3088 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
3089 qp_attr
->sq_draining
= mlx5_state
== MLX5_QP_STATE_SQ_DRAINING
;
3091 qp_attr
->max_rd_atomic
= 1 << ((be32_to_cpu(context
->params1
) >> 21) & 0x7);
3093 qp_attr
->max_dest_rd_atomic
=
3094 1 << ((be32_to_cpu(context
->params2
) >> 21) & 0x7);
3095 qp_attr
->min_rnr_timer
=
3096 (be32_to_cpu(context
->rnr_nextrecvpsn
) >> 24) & 0x1f;
3097 qp_attr
->timeout
= context
->pri_path
.ackto_lt
>> 3;
3098 qp_attr
->retry_cnt
= (be32_to_cpu(context
->params1
) >> 16) & 0x7;
3099 qp_attr
->rnr_retry
= (be32_to_cpu(context
->params1
) >> 13) & 0x7;
3100 qp_attr
->alt_timeout
= context
->alt_path
.ackto_lt
>> 3;
3101 qp_attr
->cur_qp_state
= qp_attr
->qp_state
;
3102 qp_attr
->cap
.max_recv_wr
= qp
->rq
.wqe_cnt
;
3103 qp_attr
->cap
.max_recv_sge
= qp
->rq
.max_gs
;
3105 if (!ibqp
->uobject
) {
3106 qp_attr
->cap
.max_send_wr
= qp
->sq
.wqe_cnt
;
3107 qp_attr
->cap
.max_send_sge
= qp
->sq
.max_gs
;
3109 qp_attr
->cap
.max_send_wr
= 0;
3110 qp_attr
->cap
.max_send_sge
= 0;
3113 /* We don't support inline sends for kernel QPs (yet), and we
3114 * don't know what userspace's value should be.
3116 qp_attr
->cap
.max_inline_data
= 0;
3118 qp_init_attr
->cap
= qp_attr
->cap
;
3120 qp_init_attr
->create_flags
= 0;
3121 if (qp
->flags
& MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK
)
3122 qp_init_attr
->create_flags
|= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
;
3124 qp_init_attr
->sq_sig_type
= qp
->sq_signal_bits
& MLX5_WQE_CTRL_CQ_UPDATE
?
3125 IB_SIGNAL_ALL_WR
: IB_SIGNAL_REQ_WR
;
3131 mutex_unlock(&qp
->mutex
);
3135 struct ib_xrcd
*mlx5_ib_alloc_xrcd(struct ib_device
*ibdev
,
3136 struct ib_ucontext
*context
,
3137 struct ib_udata
*udata
)
3139 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
3140 struct mlx5_general_caps
*gen
;
3141 struct mlx5_ib_xrcd
*xrcd
;
3144 gen
= &dev
->mdev
->caps
.gen
;
3145 if (!(gen
->flags
& MLX5_DEV_CAP_FLAG_XRC
))
3146 return ERR_PTR(-ENOSYS
);
3148 xrcd
= kmalloc(sizeof(*xrcd
), GFP_KERNEL
);
3150 return ERR_PTR(-ENOMEM
);
3152 err
= mlx5_core_xrcd_alloc(dev
->mdev
, &xrcd
->xrcdn
);
3155 return ERR_PTR(-ENOMEM
);
3158 return &xrcd
->ibxrcd
;
3161 int mlx5_ib_dealloc_xrcd(struct ib_xrcd
*xrcd
)
3163 struct mlx5_ib_dev
*dev
= to_mdev(xrcd
->device
);
3164 u32 xrcdn
= to_mxrcd(xrcd
)->xrcdn
;
3167 err
= mlx5_core_xrcd_dealloc(dev
->mdev
, xrcdn
);
3169 mlx5_ib_warn(dev
, "failed to dealloc xrcdn 0x%x\n", xrcdn
);