2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
34 #include <rdma/ib_umem.h>
38 /* not supported currently */
39 static int wq_signature
;
42 MLX5_IB_ACK_REQ_FREQ
= 8,
46 MLX5_IB_DEFAULT_SCHED_QUEUE
= 0x83,
47 MLX5_IB_DEFAULT_QP0_SCHED_QUEUE
= 0x3f,
48 MLX5_IB_LINK_TYPE_IB
= 0,
49 MLX5_IB_LINK_TYPE_ETH
= 1
53 MLX5_IB_SQ_STRIDE
= 6,
54 MLX5_IB_CACHE_LINE_SIZE
= 64,
57 static const u32 mlx5_ib_opcode
[] = {
58 [IB_WR_SEND
] = MLX5_OPCODE_SEND
,
59 [IB_WR_SEND_WITH_IMM
] = MLX5_OPCODE_SEND_IMM
,
60 [IB_WR_RDMA_WRITE
] = MLX5_OPCODE_RDMA_WRITE
,
61 [IB_WR_RDMA_WRITE_WITH_IMM
] = MLX5_OPCODE_RDMA_WRITE_IMM
,
62 [IB_WR_RDMA_READ
] = MLX5_OPCODE_RDMA_READ
,
63 [IB_WR_ATOMIC_CMP_AND_SWP
] = MLX5_OPCODE_ATOMIC_CS
,
64 [IB_WR_ATOMIC_FETCH_AND_ADD
] = MLX5_OPCODE_ATOMIC_FA
,
65 [IB_WR_SEND_WITH_INV
] = MLX5_OPCODE_SEND_INVAL
,
66 [IB_WR_LOCAL_INV
] = MLX5_OPCODE_UMR
,
67 [IB_WR_FAST_REG_MR
] = MLX5_OPCODE_UMR
,
68 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP
] = MLX5_OPCODE_ATOMIC_MASKED_CS
,
69 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
] = MLX5_OPCODE_ATOMIC_MASKED_FA
,
70 [MLX5_IB_WR_UMR
] = MLX5_OPCODE_UMR
,
76 unsigned int page_shift
;
83 static int is_qp0(enum ib_qp_type qp_type
)
85 return qp_type
== IB_QPT_SMI
;
88 static int is_qp1(enum ib_qp_type qp_type
)
90 return qp_type
== IB_QPT_GSI
;
93 static int is_sqp(enum ib_qp_type qp_type
)
95 return is_qp0(qp_type
) || is_qp1(qp_type
);
98 static void *get_wqe(struct mlx5_ib_qp
*qp
, int offset
)
100 return mlx5_buf_offset(&qp
->buf
, offset
);
103 static void *get_recv_wqe(struct mlx5_ib_qp
*qp
, int n
)
105 return get_wqe(qp
, qp
->rq
.offset
+ (n
<< qp
->rq
.wqe_shift
));
108 void *mlx5_get_send_wqe(struct mlx5_ib_qp
*qp
, int n
)
110 return get_wqe(qp
, qp
->sq
.offset
+ (n
<< MLX5_IB_SQ_STRIDE
));
113 static void mlx5_ib_qp_event(struct mlx5_core_qp
*qp
, int type
)
115 struct ib_qp
*ibqp
= &to_mibqp(qp
)->ibqp
;
116 struct ib_event event
;
118 if (type
== MLX5_EVENT_TYPE_PATH_MIG
)
119 to_mibqp(qp
)->port
= to_mibqp(qp
)->alt_port
;
121 if (ibqp
->event_handler
) {
122 event
.device
= ibqp
->device
;
123 event
.element
.qp
= ibqp
;
125 case MLX5_EVENT_TYPE_PATH_MIG
:
126 event
.event
= IB_EVENT_PATH_MIG
;
128 case MLX5_EVENT_TYPE_COMM_EST
:
129 event
.event
= IB_EVENT_COMM_EST
;
131 case MLX5_EVENT_TYPE_SQ_DRAINED
:
132 event
.event
= IB_EVENT_SQ_DRAINED
;
134 case MLX5_EVENT_TYPE_SRQ_LAST_WQE
:
135 event
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
137 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR
:
138 event
.event
= IB_EVENT_QP_FATAL
;
140 case MLX5_EVENT_TYPE_PATH_MIG_FAILED
:
141 event
.event
= IB_EVENT_PATH_MIG_ERR
;
143 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR
:
144 event
.event
= IB_EVENT_QP_REQ_ERR
;
146 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR
:
147 event
.event
= IB_EVENT_QP_ACCESS_ERR
;
150 pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type
, qp
->qpn
);
154 ibqp
->event_handler(&event
, ibqp
->qp_context
);
158 static int set_rq_size(struct mlx5_ib_dev
*dev
, struct ib_qp_cap
*cap
,
159 int has_rq
, struct mlx5_ib_qp
*qp
, struct mlx5_ib_create_qp
*ucmd
)
161 struct mlx5_general_caps
*gen
;
165 gen
= &dev
->mdev
->caps
.gen
;
166 /* Sanity check RQ size before proceeding */
167 if (cap
->max_recv_wr
> gen
->max_wqes
)
173 qp
->rq
.wqe_shift
= 0;
176 qp
->rq
.wqe_cnt
= ucmd
->rq_wqe_count
;
177 qp
->rq
.wqe_shift
= ucmd
->rq_wqe_shift
;
178 qp
->rq
.max_gs
= (1 << qp
->rq
.wqe_shift
) / sizeof(struct mlx5_wqe_data_seg
) - qp
->wq_sig
;
179 qp
->rq
.max_post
= qp
->rq
.wqe_cnt
;
181 wqe_size
= qp
->wq_sig
? sizeof(struct mlx5_wqe_signature_seg
) : 0;
182 wqe_size
+= cap
->max_recv_sge
* sizeof(struct mlx5_wqe_data_seg
);
183 wqe_size
= roundup_pow_of_two(wqe_size
);
184 wq_size
= roundup_pow_of_two(cap
->max_recv_wr
) * wqe_size
;
185 wq_size
= max_t(int, wq_size
, MLX5_SEND_WQE_BB
);
186 qp
->rq
.wqe_cnt
= wq_size
/ wqe_size
;
187 if (wqe_size
> gen
->max_rq_desc_sz
) {
188 mlx5_ib_dbg(dev
, "wqe_size %d, max %d\n",
190 gen
->max_rq_desc_sz
);
193 qp
->rq
.wqe_shift
= ilog2(wqe_size
);
194 qp
->rq
.max_gs
= (1 << qp
->rq
.wqe_shift
) / sizeof(struct mlx5_wqe_data_seg
) - qp
->wq_sig
;
195 qp
->rq
.max_post
= qp
->rq
.wqe_cnt
;
202 static int sq_overhead(enum ib_qp_type qp_type
)
208 size
+= sizeof(struct mlx5_wqe_xrc_seg
);
211 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
212 sizeof(struct mlx5_wqe_atomic_seg
) +
213 sizeof(struct mlx5_wqe_raddr_seg
);
220 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
221 sizeof(struct mlx5_wqe_raddr_seg
) +
222 sizeof(struct mlx5_wqe_umr_ctrl_seg
) +
223 sizeof(struct mlx5_mkey_seg
);
229 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
230 sizeof(struct mlx5_wqe_datagram_seg
);
233 case MLX5_IB_QPT_REG_UMR
:
234 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
235 sizeof(struct mlx5_wqe_umr_ctrl_seg
) +
236 sizeof(struct mlx5_mkey_seg
);
246 static int calc_send_wqe(struct ib_qp_init_attr
*attr
)
251 size
= sq_overhead(attr
->qp_type
);
255 if (attr
->cap
.max_inline_data
) {
256 inl_size
= size
+ sizeof(struct mlx5_wqe_inline_seg
) +
257 attr
->cap
.max_inline_data
;
260 size
+= attr
->cap
.max_send_sge
* sizeof(struct mlx5_wqe_data_seg
);
261 if (attr
->create_flags
& IB_QP_CREATE_SIGNATURE_EN
&&
262 ALIGN(max_t(int, inl_size
, size
), MLX5_SEND_WQE_BB
) < MLX5_SIG_WQE_SIZE
)
263 return MLX5_SIG_WQE_SIZE
;
265 return ALIGN(max_t(int, inl_size
, size
), MLX5_SEND_WQE_BB
);
268 static int calc_sq_size(struct mlx5_ib_dev
*dev
, struct ib_qp_init_attr
*attr
,
269 struct mlx5_ib_qp
*qp
)
271 struct mlx5_general_caps
*gen
;
275 gen
= &dev
->mdev
->caps
.gen
;
276 if (!attr
->cap
.max_send_wr
)
279 wqe_size
= calc_send_wqe(attr
);
280 mlx5_ib_dbg(dev
, "wqe_size %d\n", wqe_size
);
284 if (wqe_size
> gen
->max_sq_desc_sz
) {
285 mlx5_ib_dbg(dev
, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
286 wqe_size
, gen
->max_sq_desc_sz
);
290 qp
->max_inline_data
= wqe_size
- sq_overhead(attr
->qp_type
) -
291 sizeof(struct mlx5_wqe_inline_seg
);
292 attr
->cap
.max_inline_data
= qp
->max_inline_data
;
294 if (attr
->create_flags
& IB_QP_CREATE_SIGNATURE_EN
)
295 qp
->signature_en
= true;
297 wq_size
= roundup_pow_of_two(attr
->cap
.max_send_wr
* wqe_size
);
298 qp
->sq
.wqe_cnt
= wq_size
/ MLX5_SEND_WQE_BB
;
299 if (qp
->sq
.wqe_cnt
> gen
->max_wqes
) {
300 mlx5_ib_dbg(dev
, "wqe count(%d) exceeds limits(%d)\n",
301 qp
->sq
.wqe_cnt
, gen
->max_wqes
);
304 qp
->sq
.wqe_shift
= ilog2(MLX5_SEND_WQE_BB
);
305 qp
->sq
.max_gs
= attr
->cap
.max_send_sge
;
306 qp
->sq
.max_post
= wq_size
/ wqe_size
;
307 attr
->cap
.max_send_wr
= qp
->sq
.max_post
;
312 static int set_user_buf_size(struct mlx5_ib_dev
*dev
,
313 struct mlx5_ib_qp
*qp
,
314 struct mlx5_ib_create_qp
*ucmd
)
316 struct mlx5_general_caps
*gen
;
317 int desc_sz
= 1 << qp
->sq
.wqe_shift
;
319 gen
= &dev
->mdev
->caps
.gen
;
320 if (desc_sz
> gen
->max_sq_desc_sz
) {
321 mlx5_ib_warn(dev
, "desc_sz %d, max_sq_desc_sz %d\n",
322 desc_sz
, gen
->max_sq_desc_sz
);
326 if (ucmd
->sq_wqe_count
&& ((1 << ilog2(ucmd
->sq_wqe_count
)) != ucmd
->sq_wqe_count
)) {
327 mlx5_ib_warn(dev
, "sq_wqe_count %d, sq_wqe_count %d\n",
328 ucmd
->sq_wqe_count
, ucmd
->sq_wqe_count
);
332 qp
->sq
.wqe_cnt
= ucmd
->sq_wqe_count
;
334 if (qp
->sq
.wqe_cnt
> gen
->max_wqes
) {
335 mlx5_ib_warn(dev
, "wqe_cnt %d, max_wqes %d\n",
336 qp
->sq
.wqe_cnt
, gen
->max_wqes
);
340 qp
->buf_size
= (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
) +
341 (qp
->sq
.wqe_cnt
<< 6);
346 static int qp_has_rq(struct ib_qp_init_attr
*attr
)
348 if (attr
->qp_type
== IB_QPT_XRC_INI
||
349 attr
->qp_type
== IB_QPT_XRC_TGT
|| attr
->srq
||
350 attr
->qp_type
== MLX5_IB_QPT_REG_UMR
||
351 !attr
->cap
.max_recv_wr
)
357 static int first_med_uuar(void)
362 static int next_uuar(int n
)
366 while (((n
% 4) & 2))
372 static int num_med_uuar(struct mlx5_uuar_info
*uuari
)
376 n
= uuari
->num_uars
* MLX5_NON_FP_BF_REGS_PER_PAGE
-
377 uuari
->num_low_latency_uuars
- 1;
379 return n
>= 0 ? n
: 0;
382 static int max_uuari(struct mlx5_uuar_info
*uuari
)
384 return uuari
->num_uars
* 4;
387 static int first_hi_uuar(struct mlx5_uuar_info
*uuari
)
393 med
= num_med_uuar(uuari
);
394 for (t
= 0, i
= first_med_uuar();; i
= next_uuar(i
)) {
403 static int alloc_high_class_uuar(struct mlx5_uuar_info
*uuari
)
407 for (i
= first_hi_uuar(uuari
); i
< max_uuari(uuari
); i
= next_uuar(i
)) {
408 if (!test_bit(i
, uuari
->bitmap
)) {
409 set_bit(i
, uuari
->bitmap
);
418 static int alloc_med_class_uuar(struct mlx5_uuar_info
*uuari
)
420 int minidx
= first_med_uuar();
423 for (i
= first_med_uuar(); i
< first_hi_uuar(uuari
); i
= next_uuar(i
)) {
424 if (uuari
->count
[i
] < uuari
->count
[minidx
])
428 uuari
->count
[minidx
]++;
432 static int alloc_uuar(struct mlx5_uuar_info
*uuari
,
433 enum mlx5_ib_latency_class lat
)
437 mutex_lock(&uuari
->lock
);
439 case MLX5_IB_LATENCY_CLASS_LOW
:
441 uuari
->count
[uuarn
]++;
444 case MLX5_IB_LATENCY_CLASS_MEDIUM
:
448 uuarn
= alloc_med_class_uuar(uuari
);
451 case MLX5_IB_LATENCY_CLASS_HIGH
:
455 uuarn
= alloc_high_class_uuar(uuari
);
458 case MLX5_IB_LATENCY_CLASS_FAST_PATH
:
462 mutex_unlock(&uuari
->lock
);
467 static void free_med_class_uuar(struct mlx5_uuar_info
*uuari
, int uuarn
)
469 clear_bit(uuarn
, uuari
->bitmap
);
470 --uuari
->count
[uuarn
];
473 static void free_high_class_uuar(struct mlx5_uuar_info
*uuari
, int uuarn
)
475 clear_bit(uuarn
, uuari
->bitmap
);
476 --uuari
->count
[uuarn
];
479 static void free_uuar(struct mlx5_uuar_info
*uuari
, int uuarn
)
481 int nuuars
= uuari
->num_uars
* MLX5_BF_REGS_PER_PAGE
;
482 int high_uuar
= nuuars
- uuari
->num_low_latency_uuars
;
484 mutex_lock(&uuari
->lock
);
486 --uuari
->count
[uuarn
];
490 if (uuarn
< high_uuar
) {
491 free_med_class_uuar(uuari
, uuarn
);
495 free_high_class_uuar(uuari
, uuarn
);
498 mutex_unlock(&uuari
->lock
);
501 static enum mlx5_qp_state
to_mlx5_state(enum ib_qp_state state
)
504 case IB_QPS_RESET
: return MLX5_QP_STATE_RST
;
505 case IB_QPS_INIT
: return MLX5_QP_STATE_INIT
;
506 case IB_QPS_RTR
: return MLX5_QP_STATE_RTR
;
507 case IB_QPS_RTS
: return MLX5_QP_STATE_RTS
;
508 case IB_QPS_SQD
: return MLX5_QP_STATE_SQD
;
509 case IB_QPS_SQE
: return MLX5_QP_STATE_SQER
;
510 case IB_QPS_ERR
: return MLX5_QP_STATE_ERR
;
515 static int to_mlx5_st(enum ib_qp_type type
)
518 case IB_QPT_RC
: return MLX5_QP_ST_RC
;
519 case IB_QPT_UC
: return MLX5_QP_ST_UC
;
520 case IB_QPT_UD
: return MLX5_QP_ST_UD
;
521 case MLX5_IB_QPT_REG_UMR
: return MLX5_QP_ST_REG_UMR
;
523 case IB_QPT_XRC_TGT
: return MLX5_QP_ST_XRC
;
524 case IB_QPT_SMI
: return MLX5_QP_ST_QP0
;
525 case IB_QPT_GSI
: return MLX5_QP_ST_QP1
;
526 case IB_QPT_RAW_IPV6
: return MLX5_QP_ST_RAW_IPV6
;
527 case IB_QPT_RAW_ETHERTYPE
: return MLX5_QP_ST_RAW_ETHERTYPE
;
528 case IB_QPT_RAW_PACKET
:
530 default: return -EINVAL
;
534 static int uuarn_to_uar_index(struct mlx5_uuar_info
*uuari
, int uuarn
)
536 return uuari
->uars
[uuarn
/ MLX5_BF_REGS_PER_PAGE
].index
;
539 static int create_user_qp(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
540 struct mlx5_ib_qp
*qp
, struct ib_udata
*udata
,
541 struct mlx5_create_qp_mbox_in
**in
,
542 struct mlx5_ib_create_qp_resp
*resp
, int *inlen
)
544 struct mlx5_ib_ucontext
*context
;
545 struct mlx5_ib_create_qp ucmd
;
554 err
= ib_copy_from_udata(&ucmd
, udata
, sizeof(ucmd
));
556 mlx5_ib_dbg(dev
, "copy failed\n");
560 context
= to_mucontext(pd
->uobject
->context
);
562 * TBD: should come from the verbs when we have the API
564 uuarn
= alloc_uuar(&context
->uuari
, MLX5_IB_LATENCY_CLASS_HIGH
);
566 mlx5_ib_dbg(dev
, "failed to allocate low latency UUAR\n");
567 mlx5_ib_dbg(dev
, "reverting to medium latency\n");
568 uuarn
= alloc_uuar(&context
->uuari
, MLX5_IB_LATENCY_CLASS_MEDIUM
);
570 mlx5_ib_dbg(dev
, "failed to allocate medium latency UUAR\n");
571 mlx5_ib_dbg(dev
, "reverting to high latency\n");
572 uuarn
= alloc_uuar(&context
->uuari
, MLX5_IB_LATENCY_CLASS_LOW
);
574 mlx5_ib_warn(dev
, "uuar allocation failed\n");
580 uar_index
= uuarn_to_uar_index(&context
->uuari
, uuarn
);
581 mlx5_ib_dbg(dev
, "uuarn 0x%x, uar_index 0x%x\n", uuarn
, uar_index
);
584 qp
->sq
.wqe_shift
= ilog2(MLX5_SEND_WQE_BB
);
585 qp
->sq
.offset
= qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
;
587 err
= set_user_buf_size(dev
, qp
, &ucmd
);
591 if (ucmd
.buf_addr
&& qp
->buf_size
) {
592 qp
->umem
= ib_umem_get(pd
->uobject
->context
, ucmd
.buf_addr
,
594 if (IS_ERR(qp
->umem
)) {
595 mlx5_ib_dbg(dev
, "umem_get failed\n");
596 err
= PTR_ERR(qp
->umem
);
604 mlx5_ib_cont_pages(qp
->umem
, ucmd
.buf_addr
, &npages
, &page_shift
,
606 err
= mlx5_ib_get_buf_offset(ucmd
.buf_addr
, page_shift
, &offset
);
608 mlx5_ib_warn(dev
, "bad offset\n");
611 mlx5_ib_dbg(dev
, "addr 0x%llx, size %d, npages %d, page_shift %d, ncont %d, offset %d\n",
612 ucmd
.buf_addr
, qp
->buf_size
, npages
, page_shift
, ncont
, offset
);
615 *inlen
= sizeof(**in
) + sizeof(*(*in
)->pas
) * ncont
;
616 *in
= mlx5_vzalloc(*inlen
);
622 mlx5_ib_populate_pas(dev
, qp
->umem
, page_shift
, (*in
)->pas
, 0);
623 (*in
)->ctx
.log_pg_sz_remote_qpn
=
624 cpu_to_be32((page_shift
- MLX5_ADAPTER_PAGE_SHIFT
) << 24);
625 (*in
)->ctx
.params2
= cpu_to_be32(offset
<< 6);
627 (*in
)->ctx
.qp_counter_set_usr_page
= cpu_to_be32(uar_index
);
628 resp
->uuar_index
= uuarn
;
631 err
= mlx5_ib_db_map_user(context
, ucmd
.db_addr
, &qp
->db
);
633 mlx5_ib_dbg(dev
, "map failed\n");
637 err
= ib_copy_to_udata(udata
, resp
, sizeof(*resp
));
639 mlx5_ib_dbg(dev
, "copy failed\n");
642 qp
->create_type
= MLX5_QP_USER
;
647 mlx5_ib_db_unmap_user(context
, &qp
->db
);
654 ib_umem_release(qp
->umem
);
657 free_uuar(&context
->uuari
, uuarn
);
661 static void destroy_qp_user(struct ib_pd
*pd
, struct mlx5_ib_qp
*qp
)
663 struct mlx5_ib_ucontext
*context
;
665 context
= to_mucontext(pd
->uobject
->context
);
666 mlx5_ib_db_unmap_user(context
, &qp
->db
);
668 ib_umem_release(qp
->umem
);
669 free_uuar(&context
->uuari
, qp
->uuarn
);
672 static int create_kernel_qp(struct mlx5_ib_dev
*dev
,
673 struct ib_qp_init_attr
*init_attr
,
674 struct mlx5_ib_qp
*qp
,
675 struct mlx5_create_qp_mbox_in
**in
, int *inlen
)
677 enum mlx5_ib_latency_class lc
= MLX5_IB_LATENCY_CLASS_LOW
;
678 struct mlx5_uuar_info
*uuari
;
683 uuari
= &dev
->mdev
->priv
.uuari
;
684 if (init_attr
->create_flags
& ~(IB_QP_CREATE_SIGNATURE_EN
| IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
))
687 if (init_attr
->qp_type
== MLX5_IB_QPT_REG_UMR
)
688 lc
= MLX5_IB_LATENCY_CLASS_FAST_PATH
;
690 uuarn
= alloc_uuar(uuari
, lc
);
692 mlx5_ib_dbg(dev
, "\n");
696 qp
->bf
= &uuari
->bfs
[uuarn
];
697 uar_index
= qp
->bf
->uar
->index
;
699 err
= calc_sq_size(dev
, init_attr
, qp
);
701 mlx5_ib_dbg(dev
, "err %d\n", err
);
706 qp
->sq
.offset
= qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
;
707 qp
->buf_size
= err
+ (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
);
709 err
= mlx5_buf_alloc(dev
->mdev
, qp
->buf_size
, PAGE_SIZE
* 2, &qp
->buf
);
711 mlx5_ib_dbg(dev
, "err %d\n", err
);
715 qp
->sq
.qend
= mlx5_get_send_wqe(qp
, qp
->sq
.wqe_cnt
);
716 *inlen
= sizeof(**in
) + sizeof(*(*in
)->pas
) * qp
->buf
.npages
;
717 *in
= mlx5_vzalloc(*inlen
);
722 (*in
)->ctx
.qp_counter_set_usr_page
= cpu_to_be32(uar_index
);
723 (*in
)->ctx
.log_pg_sz_remote_qpn
=
724 cpu_to_be32((qp
->buf
.page_shift
- MLX5_ADAPTER_PAGE_SHIFT
) << 24);
725 /* Set "fast registration enabled" for all kernel QPs */
726 (*in
)->ctx
.params1
|= cpu_to_be32(1 << 11);
727 (*in
)->ctx
.sq_crq_size
|= cpu_to_be16(1 << 4);
729 mlx5_fill_page_array(&qp
->buf
, (*in
)->pas
);
731 err
= mlx5_db_alloc(dev
->mdev
, &qp
->db
);
733 mlx5_ib_dbg(dev
, "err %d\n", err
);
740 qp
->sq
.wrid
= kmalloc(qp
->sq
.wqe_cnt
* sizeof(*qp
->sq
.wrid
), GFP_KERNEL
);
741 qp
->sq
.wr_data
= kmalloc(qp
->sq
.wqe_cnt
* sizeof(*qp
->sq
.wr_data
), GFP_KERNEL
);
742 qp
->rq
.wrid
= kmalloc(qp
->rq
.wqe_cnt
* sizeof(*qp
->rq
.wrid
), GFP_KERNEL
);
743 qp
->sq
.w_list
= kmalloc(qp
->sq
.wqe_cnt
* sizeof(*qp
->sq
.w_list
), GFP_KERNEL
);
744 qp
->sq
.wqe_head
= kmalloc(qp
->sq
.wqe_cnt
* sizeof(*qp
->sq
.wqe_head
), GFP_KERNEL
);
746 if (!qp
->sq
.wrid
|| !qp
->sq
.wr_data
|| !qp
->rq
.wrid
||
747 !qp
->sq
.w_list
|| !qp
->sq
.wqe_head
) {
751 qp
->create_type
= MLX5_QP_KERNEL
;
756 mlx5_db_free(dev
->mdev
, &qp
->db
);
757 kfree(qp
->sq
.wqe_head
);
758 kfree(qp
->sq
.w_list
);
760 kfree(qp
->sq
.wr_data
);
767 mlx5_buf_free(dev
->mdev
, &qp
->buf
);
770 free_uuar(&dev
->mdev
->priv
.uuari
, uuarn
);
774 static void destroy_qp_kernel(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
)
776 mlx5_db_free(dev
->mdev
, &qp
->db
);
777 kfree(qp
->sq
.wqe_head
);
778 kfree(qp
->sq
.w_list
);
780 kfree(qp
->sq
.wr_data
);
782 mlx5_buf_free(dev
->mdev
, &qp
->buf
);
783 free_uuar(&dev
->mdev
->priv
.uuari
, qp
->bf
->uuarn
);
786 static __be32
get_rx_type(struct mlx5_ib_qp
*qp
, struct ib_qp_init_attr
*attr
)
788 if (attr
->srq
|| (attr
->qp_type
== IB_QPT_XRC_TGT
) ||
789 (attr
->qp_type
== IB_QPT_XRC_INI
))
790 return cpu_to_be32(MLX5_SRQ_RQ
);
791 else if (!qp
->has_rq
)
792 return cpu_to_be32(MLX5_ZERO_LEN_RQ
);
794 return cpu_to_be32(MLX5_NON_ZERO_RQ
);
797 static int is_connected(enum ib_qp_type qp_type
)
799 if (qp_type
== IB_QPT_RC
|| qp_type
== IB_QPT_UC
)
805 static int create_qp_common(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
806 struct ib_qp_init_attr
*init_attr
,
807 struct ib_udata
*udata
, struct mlx5_ib_qp
*qp
)
809 struct mlx5_ib_resources
*devr
= &dev
->devr
;
810 struct mlx5_ib_create_qp_resp resp
;
811 struct mlx5_create_qp_mbox_in
*in
;
812 struct mlx5_general_caps
*gen
;
813 struct mlx5_ib_create_qp ucmd
;
814 int inlen
= sizeof(*in
);
817 gen
= &dev
->mdev
->caps
.gen
;
818 mutex_init(&qp
->mutex
);
819 spin_lock_init(&qp
->sq
.lock
);
820 spin_lock_init(&qp
->rq
.lock
);
822 if (init_attr
->create_flags
& IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
) {
823 if (!(gen
->flags
& MLX5_DEV_CAP_FLAG_BLOCK_MCAST
)) {
824 mlx5_ib_dbg(dev
, "block multicast loopback isn't supported\n");
827 qp
->flags
|= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK
;
831 if (init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
)
832 qp
->sq_signal_bits
= MLX5_WQE_CTRL_CQ_UPDATE
;
834 if (pd
&& pd
->uobject
) {
835 if (ib_copy_from_udata(&ucmd
, udata
, sizeof(ucmd
))) {
836 mlx5_ib_dbg(dev
, "copy failed\n");
840 qp
->wq_sig
= !!(ucmd
.flags
& MLX5_QP_FLAG_SIGNATURE
);
841 qp
->scat_cqe
= !!(ucmd
.flags
& MLX5_QP_FLAG_SCATTER_CQE
);
843 qp
->wq_sig
= !!wq_signature
;
846 qp
->has_rq
= qp_has_rq(init_attr
);
847 err
= set_rq_size(dev
, &init_attr
->cap
, qp
->has_rq
,
848 qp
, (pd
&& pd
->uobject
) ? &ucmd
: NULL
);
850 mlx5_ib_dbg(dev
, "err %d\n", err
);
856 mlx5_ib_dbg(dev
, "requested sq_wqe_count (%d)\n", ucmd
.sq_wqe_count
);
857 if (ucmd
.rq_wqe_shift
!= qp
->rq
.wqe_shift
||
858 ucmd
.rq_wqe_count
!= qp
->rq
.wqe_cnt
) {
859 mlx5_ib_dbg(dev
, "invalid rq params\n");
862 if (ucmd
.sq_wqe_count
> gen
->max_wqes
) {
863 mlx5_ib_dbg(dev
, "requested sq_wqe_count (%d) > max allowed (%d)\n",
864 ucmd
.sq_wqe_count
, gen
->max_wqes
);
867 err
= create_user_qp(dev
, pd
, qp
, udata
, &in
, &resp
, &inlen
);
869 mlx5_ib_dbg(dev
, "err %d\n", err
);
871 err
= create_kernel_qp(dev
, init_attr
, qp
, &in
, &inlen
);
873 mlx5_ib_dbg(dev
, "err %d\n", err
);
875 qp
->pa_lkey
= to_mpd(pd
)->pa_lkey
;
881 in
= mlx5_vzalloc(sizeof(*in
));
885 qp
->create_type
= MLX5_QP_EMPTY
;
888 if (is_sqp(init_attr
->qp_type
))
889 qp
->port
= init_attr
->port_num
;
891 in
->ctx
.flags
= cpu_to_be32(to_mlx5_st(init_attr
->qp_type
) << 16 |
892 MLX5_QP_PM_MIGRATED
<< 11);
894 if (init_attr
->qp_type
!= MLX5_IB_QPT_REG_UMR
)
895 in
->ctx
.flags_pd
= cpu_to_be32(to_mpd(pd
? pd
: devr
->p0
)->pdn
);
897 in
->ctx
.flags_pd
= cpu_to_be32(MLX5_QP_LAT_SENSITIVE
);
900 in
->ctx
.flags_pd
|= cpu_to_be32(MLX5_QP_ENABLE_SIG
);
902 if (qp
->flags
& MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK
)
903 in
->ctx
.flags_pd
|= cpu_to_be32(MLX5_QP_BLOCK_MCAST
);
905 if (qp
->scat_cqe
&& is_connected(init_attr
->qp_type
)) {
909 rcqe_sz
= mlx5_ib_get_cqe_size(dev
, init_attr
->recv_cq
);
910 scqe_sz
= mlx5_ib_get_cqe_size(dev
, init_attr
->send_cq
);
913 in
->ctx
.cs_res
= MLX5_RES_SCAT_DATA64_CQE
;
915 in
->ctx
.cs_res
= MLX5_RES_SCAT_DATA32_CQE
;
917 if (init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
) {
919 in
->ctx
.cs_req
= MLX5_REQ_SCAT_DATA64_CQE
;
921 in
->ctx
.cs_req
= MLX5_REQ_SCAT_DATA32_CQE
;
925 if (qp
->rq
.wqe_cnt
) {
926 in
->ctx
.rq_size_stride
= (qp
->rq
.wqe_shift
- 4);
927 in
->ctx
.rq_size_stride
|= ilog2(qp
->rq
.wqe_cnt
) << 3;
930 in
->ctx
.rq_type_srqn
= get_rx_type(qp
, init_attr
);
933 in
->ctx
.sq_crq_size
|= cpu_to_be16(ilog2(qp
->sq
.wqe_cnt
) << 11);
935 in
->ctx
.sq_crq_size
|= cpu_to_be16(0x8000);
937 /* Set default resources */
938 switch (init_attr
->qp_type
) {
940 in
->ctx
.cqn_recv
= cpu_to_be32(to_mcq(devr
->c0
)->mcq
.cqn
);
941 in
->ctx
.cqn_send
= cpu_to_be32(to_mcq(devr
->c0
)->mcq
.cqn
);
942 in
->ctx
.rq_type_srqn
|= cpu_to_be32(to_msrq(devr
->s0
)->msrq
.srqn
);
943 in
->ctx
.xrcd
= cpu_to_be32(to_mxrcd(init_attr
->xrcd
)->xrcdn
);
946 in
->ctx
.cqn_recv
= cpu_to_be32(to_mcq(devr
->c0
)->mcq
.cqn
);
947 in
->ctx
.xrcd
= cpu_to_be32(to_mxrcd(devr
->x1
)->xrcdn
);
948 in
->ctx
.rq_type_srqn
|= cpu_to_be32(to_msrq(devr
->s0
)->msrq
.srqn
);
951 if (init_attr
->srq
) {
952 in
->ctx
.xrcd
= cpu_to_be32(to_mxrcd(devr
->x0
)->xrcdn
);
953 in
->ctx
.rq_type_srqn
|= cpu_to_be32(to_msrq(init_attr
->srq
)->msrq
.srqn
);
955 in
->ctx
.xrcd
= cpu_to_be32(to_mxrcd(devr
->x1
)->xrcdn
);
956 in
->ctx
.rq_type_srqn
|= cpu_to_be32(to_msrq(devr
->s0
)->msrq
.srqn
);
960 if (init_attr
->send_cq
)
961 in
->ctx
.cqn_send
= cpu_to_be32(to_mcq(init_attr
->send_cq
)->mcq
.cqn
);
963 if (init_attr
->recv_cq
)
964 in
->ctx
.cqn_recv
= cpu_to_be32(to_mcq(init_attr
->recv_cq
)->mcq
.cqn
);
966 in
->ctx
.db_rec_addr
= cpu_to_be64(qp
->db
.dma
);
968 err
= mlx5_core_create_qp(dev
->mdev
, &qp
->mqp
, in
, inlen
);
970 mlx5_ib_dbg(dev
, "create qp failed\n");
975 /* Hardware wants QPN written in big-endian order (after
976 * shifting) for send doorbell. Precompute this value to save
977 * a little bit when posting sends.
979 qp
->doorbell_qpn
= swab32(qp
->mqp
.qpn
<< 8);
981 qp
->mqp
.event
= mlx5_ib_qp_event
;
986 if (qp
->create_type
== MLX5_QP_USER
)
987 destroy_qp_user(pd
, qp
);
988 else if (qp
->create_type
== MLX5_QP_KERNEL
)
989 destroy_qp_kernel(dev
, qp
);
995 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq
*send_cq
, struct mlx5_ib_cq
*recv_cq
)
996 __acquires(&send_cq
->lock
) __acquires(&recv_cq
->lock
)
1000 if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
1001 spin_lock_irq(&send_cq
->lock
);
1002 spin_lock_nested(&recv_cq
->lock
,
1003 SINGLE_DEPTH_NESTING
);
1004 } else if (send_cq
->mcq
.cqn
== recv_cq
->mcq
.cqn
) {
1005 spin_lock_irq(&send_cq
->lock
);
1006 __acquire(&recv_cq
->lock
);
1008 spin_lock_irq(&recv_cq
->lock
);
1009 spin_lock_nested(&send_cq
->lock
,
1010 SINGLE_DEPTH_NESTING
);
1013 spin_lock_irq(&send_cq
->lock
);
1015 } else if (recv_cq
) {
1016 spin_lock_irq(&recv_cq
->lock
);
1020 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq
*send_cq
, struct mlx5_ib_cq
*recv_cq
)
1021 __releases(&send_cq
->lock
) __releases(&recv_cq
->lock
)
1025 if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
1026 spin_unlock(&recv_cq
->lock
);
1027 spin_unlock_irq(&send_cq
->lock
);
1028 } else if (send_cq
->mcq
.cqn
== recv_cq
->mcq
.cqn
) {
1029 __release(&recv_cq
->lock
);
1030 spin_unlock_irq(&send_cq
->lock
);
1032 spin_unlock(&send_cq
->lock
);
1033 spin_unlock_irq(&recv_cq
->lock
);
1036 spin_unlock_irq(&send_cq
->lock
);
1038 } else if (recv_cq
) {
1039 spin_unlock_irq(&recv_cq
->lock
);
1043 static struct mlx5_ib_pd
*get_pd(struct mlx5_ib_qp
*qp
)
1045 return to_mpd(qp
->ibqp
.pd
);
1048 static void get_cqs(struct mlx5_ib_qp
*qp
,
1049 struct mlx5_ib_cq
**send_cq
, struct mlx5_ib_cq
**recv_cq
)
1051 switch (qp
->ibqp
.qp_type
) {
1052 case IB_QPT_XRC_TGT
:
1056 case MLX5_IB_QPT_REG_UMR
:
1057 case IB_QPT_XRC_INI
:
1058 *send_cq
= to_mcq(qp
->ibqp
.send_cq
);
1067 case IB_QPT_RAW_IPV6
:
1068 case IB_QPT_RAW_ETHERTYPE
:
1069 *send_cq
= to_mcq(qp
->ibqp
.send_cq
);
1070 *recv_cq
= to_mcq(qp
->ibqp
.recv_cq
);
1073 case IB_QPT_RAW_PACKET
:
1082 static void destroy_qp_common(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
)
1084 struct mlx5_ib_cq
*send_cq
, *recv_cq
;
1085 struct mlx5_modify_qp_mbox_in
*in
;
1088 in
= kzalloc(sizeof(*in
), GFP_KERNEL
);
1091 if (qp
->state
!= IB_QPS_RESET
)
1092 if (mlx5_core_qp_modify(dev
->mdev
, to_mlx5_state(qp
->state
),
1093 MLX5_QP_STATE_RST
, in
, sizeof(*in
), &qp
->mqp
))
1094 mlx5_ib_warn(dev
, "mlx5_ib: modify QP %06x to RESET failed\n",
1097 get_cqs(qp
, &send_cq
, &recv_cq
);
1099 if (qp
->create_type
== MLX5_QP_KERNEL
) {
1100 mlx5_ib_lock_cqs(send_cq
, recv_cq
);
1101 __mlx5_ib_cq_clean(recv_cq
, qp
->mqp
.qpn
,
1102 qp
->ibqp
.srq
? to_msrq(qp
->ibqp
.srq
) : NULL
);
1103 if (send_cq
!= recv_cq
)
1104 __mlx5_ib_cq_clean(send_cq
, qp
->mqp
.qpn
, NULL
);
1105 mlx5_ib_unlock_cqs(send_cq
, recv_cq
);
1108 err
= mlx5_core_destroy_qp(dev
->mdev
, &qp
->mqp
);
1110 mlx5_ib_warn(dev
, "failed to destroy QP 0x%x\n", qp
->mqp
.qpn
);
1114 if (qp
->create_type
== MLX5_QP_KERNEL
)
1115 destroy_qp_kernel(dev
, qp
);
1116 else if (qp
->create_type
== MLX5_QP_USER
)
1117 destroy_qp_user(&get_pd(qp
)->ibpd
, qp
);
1120 static const char *ib_qp_type_str(enum ib_qp_type type
)
1124 return "IB_QPT_SMI";
1126 return "IB_QPT_GSI";
1133 case IB_QPT_RAW_IPV6
:
1134 return "IB_QPT_RAW_IPV6";
1135 case IB_QPT_RAW_ETHERTYPE
:
1136 return "IB_QPT_RAW_ETHERTYPE";
1137 case IB_QPT_XRC_INI
:
1138 return "IB_QPT_XRC_INI";
1139 case IB_QPT_XRC_TGT
:
1140 return "IB_QPT_XRC_TGT";
1141 case IB_QPT_RAW_PACKET
:
1142 return "IB_QPT_RAW_PACKET";
1143 case MLX5_IB_QPT_REG_UMR
:
1144 return "MLX5_IB_QPT_REG_UMR";
1147 return "Invalid QP type";
1151 struct ib_qp
*mlx5_ib_create_qp(struct ib_pd
*pd
,
1152 struct ib_qp_init_attr
*init_attr
,
1153 struct ib_udata
*udata
)
1155 struct mlx5_general_caps
*gen
;
1156 struct mlx5_ib_dev
*dev
;
1157 struct mlx5_ib_qp
*qp
;
1162 dev
= to_mdev(pd
->device
);
1164 /* being cautious here */
1165 if (init_attr
->qp_type
!= IB_QPT_XRC_TGT
&&
1166 init_attr
->qp_type
!= MLX5_IB_QPT_REG_UMR
) {
1167 pr_warn("%s: no PD for transport %s\n", __func__
,
1168 ib_qp_type_str(init_attr
->qp_type
));
1169 return ERR_PTR(-EINVAL
);
1171 dev
= to_mdev(to_mxrcd(init_attr
->xrcd
)->ibxrcd
.device
);
1173 gen
= &dev
->mdev
->caps
.gen
;
1175 switch (init_attr
->qp_type
) {
1176 case IB_QPT_XRC_TGT
:
1177 case IB_QPT_XRC_INI
:
1178 if (!(gen
->flags
& MLX5_DEV_CAP_FLAG_XRC
)) {
1179 mlx5_ib_dbg(dev
, "XRC not supported\n");
1180 return ERR_PTR(-ENOSYS
);
1182 init_attr
->recv_cq
= NULL
;
1183 if (init_attr
->qp_type
== IB_QPT_XRC_TGT
) {
1184 xrcdn
= to_mxrcd(init_attr
->xrcd
)->xrcdn
;
1185 init_attr
->send_cq
= NULL
;
1194 case MLX5_IB_QPT_REG_UMR
:
1195 qp
= kzalloc(sizeof(*qp
), GFP_KERNEL
);
1197 return ERR_PTR(-ENOMEM
);
1199 err
= create_qp_common(dev
, pd
, init_attr
, udata
, qp
);
1201 mlx5_ib_dbg(dev
, "create_qp_common failed\n");
1203 return ERR_PTR(err
);
1206 if (is_qp0(init_attr
->qp_type
))
1207 qp
->ibqp
.qp_num
= 0;
1208 else if (is_qp1(init_attr
->qp_type
))
1209 qp
->ibqp
.qp_num
= 1;
1211 qp
->ibqp
.qp_num
= qp
->mqp
.qpn
;
1213 mlx5_ib_dbg(dev
, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
1214 qp
->ibqp
.qp_num
, qp
->mqp
.qpn
, to_mcq(init_attr
->recv_cq
)->mcq
.cqn
,
1215 to_mcq(init_attr
->send_cq
)->mcq
.cqn
);
1221 case IB_QPT_RAW_IPV6
:
1222 case IB_QPT_RAW_ETHERTYPE
:
1223 case IB_QPT_RAW_PACKET
:
1226 mlx5_ib_dbg(dev
, "unsupported qp type %d\n",
1227 init_attr
->qp_type
);
1228 /* Don't support raw QPs */
1229 return ERR_PTR(-EINVAL
);
1235 int mlx5_ib_destroy_qp(struct ib_qp
*qp
)
1237 struct mlx5_ib_dev
*dev
= to_mdev(qp
->device
);
1238 struct mlx5_ib_qp
*mqp
= to_mqp(qp
);
1240 destroy_qp_common(dev
, mqp
);
1247 static __be32
to_mlx5_access_flags(struct mlx5_ib_qp
*qp
, const struct ib_qp_attr
*attr
,
1250 u32 hw_access_flags
= 0;
1254 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
1255 dest_rd_atomic
= attr
->max_dest_rd_atomic
;
1257 dest_rd_atomic
= qp
->resp_depth
;
1259 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
1260 access_flags
= attr
->qp_access_flags
;
1262 access_flags
= qp
->atomic_rd_en
;
1264 if (!dest_rd_atomic
)
1265 access_flags
&= IB_ACCESS_REMOTE_WRITE
;
1267 if (access_flags
& IB_ACCESS_REMOTE_READ
)
1268 hw_access_flags
|= MLX5_QP_BIT_RRE
;
1269 if (access_flags
& IB_ACCESS_REMOTE_ATOMIC
)
1270 hw_access_flags
|= (MLX5_QP_BIT_RAE
| MLX5_ATOMIC_MODE_CX
);
1271 if (access_flags
& IB_ACCESS_REMOTE_WRITE
)
1272 hw_access_flags
|= MLX5_QP_BIT_RWE
;
1274 return cpu_to_be32(hw_access_flags
);
1278 MLX5_PATH_FLAG_FL
= 1 << 0,
1279 MLX5_PATH_FLAG_FREE_AR
= 1 << 1,
1280 MLX5_PATH_FLAG_COUNTER
= 1 << 2,
1283 static int ib_rate_to_mlx5(struct mlx5_ib_dev
*dev
, u8 rate
)
1285 struct mlx5_general_caps
*gen
;
1287 gen
= &dev
->mdev
->caps
.gen
;
1288 if (rate
== IB_RATE_PORT_CURRENT
) {
1290 } else if (rate
< IB_RATE_2_5_GBPS
|| rate
> IB_RATE_300_GBPS
) {
1293 while (rate
!= IB_RATE_2_5_GBPS
&&
1294 !(1 << (rate
+ MLX5_STAT_RATE_OFFSET
) &
1295 gen
->stat_rate_support
))
1299 return rate
+ MLX5_STAT_RATE_OFFSET
;
1302 static int mlx5_set_path(struct mlx5_ib_dev
*dev
, const struct ib_ah_attr
*ah
,
1303 struct mlx5_qp_path
*path
, u8 port
, int attr_mask
,
1304 u32 path_flags
, const struct ib_qp_attr
*attr
)
1306 struct mlx5_general_caps
*gen
;
1309 gen
= &dev
->mdev
->caps
.gen
;
1310 path
->fl
= (path_flags
& MLX5_PATH_FLAG_FL
) ? 0x80 : 0;
1311 path
->free_ar
= (path_flags
& MLX5_PATH_FLAG_FREE_AR
) ? 0x80 : 0;
1313 if (attr_mask
& IB_QP_PKEY_INDEX
)
1314 path
->pkey_index
= attr
->pkey_index
;
1316 path
->grh_mlid
= ah
->src_path_bits
& 0x7f;
1317 path
->rlid
= cpu_to_be16(ah
->dlid
);
1319 if (ah
->ah_flags
& IB_AH_GRH
) {
1320 if (ah
->grh
.sgid_index
>= gen
->port
[port
- 1].gid_table_len
) {
1321 pr_err(KERN_ERR
"sgid_index (%u) too large. max is %d\n",
1322 ah
->grh
.sgid_index
, gen
->port
[port
- 1].gid_table_len
);
1325 path
->grh_mlid
|= 1 << 7;
1326 path
->mgid_index
= ah
->grh
.sgid_index
;
1327 path
->hop_limit
= ah
->grh
.hop_limit
;
1328 path
->tclass_flowlabel
=
1329 cpu_to_be32((ah
->grh
.traffic_class
<< 20) |
1330 (ah
->grh
.flow_label
));
1331 memcpy(path
->rgid
, ah
->grh
.dgid
.raw
, 16);
1334 err
= ib_rate_to_mlx5(dev
, ah
->static_rate
);
1337 path
->static_rate
= err
;
1340 if (attr_mask
& IB_QP_TIMEOUT
)
1341 path
->ackto_lt
= attr
->timeout
<< 3;
1343 path
->sl
= ah
->sl
& 0xf;
1348 static enum mlx5_qp_optpar opt_mask
[MLX5_QP_NUM_STATE
][MLX5_QP_NUM_STATE
][MLX5_QP_ST_MAX
] = {
1349 [MLX5_QP_STATE_INIT
] = {
1350 [MLX5_QP_STATE_INIT
] = {
1351 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_RRE
|
1352 MLX5_QP_OPTPAR_RAE
|
1353 MLX5_QP_OPTPAR_RWE
|
1354 MLX5_QP_OPTPAR_PKEY_INDEX
|
1355 MLX5_QP_OPTPAR_PRI_PORT
,
1356 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_RWE
|
1357 MLX5_QP_OPTPAR_PKEY_INDEX
|
1358 MLX5_QP_OPTPAR_PRI_PORT
,
1359 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_PKEY_INDEX
|
1360 MLX5_QP_OPTPAR_Q_KEY
|
1361 MLX5_QP_OPTPAR_PRI_PORT
,
1363 [MLX5_QP_STATE_RTR
] = {
1364 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
1365 MLX5_QP_OPTPAR_RRE
|
1366 MLX5_QP_OPTPAR_RAE
|
1367 MLX5_QP_OPTPAR_RWE
|
1368 MLX5_QP_OPTPAR_PKEY_INDEX
,
1369 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
1370 MLX5_QP_OPTPAR_RWE
|
1371 MLX5_QP_OPTPAR_PKEY_INDEX
,
1372 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_PKEY_INDEX
|
1373 MLX5_QP_OPTPAR_Q_KEY
,
1374 [MLX5_QP_ST_MLX
] = MLX5_QP_OPTPAR_PKEY_INDEX
|
1375 MLX5_QP_OPTPAR_Q_KEY
,
1376 [MLX5_QP_ST_XRC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
1377 MLX5_QP_OPTPAR_RRE
|
1378 MLX5_QP_OPTPAR_RAE
|
1379 MLX5_QP_OPTPAR_RWE
|
1380 MLX5_QP_OPTPAR_PKEY_INDEX
,
1383 [MLX5_QP_STATE_RTR
] = {
1384 [MLX5_QP_STATE_RTS
] = {
1385 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
1386 MLX5_QP_OPTPAR_RRE
|
1387 MLX5_QP_OPTPAR_RAE
|
1388 MLX5_QP_OPTPAR_RWE
|
1389 MLX5_QP_OPTPAR_PM_STATE
|
1390 MLX5_QP_OPTPAR_RNR_TIMEOUT
,
1391 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
1392 MLX5_QP_OPTPAR_RWE
|
1393 MLX5_QP_OPTPAR_PM_STATE
,
1394 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_Q_KEY
,
1397 [MLX5_QP_STATE_RTS
] = {
1398 [MLX5_QP_STATE_RTS
] = {
1399 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_RRE
|
1400 MLX5_QP_OPTPAR_RAE
|
1401 MLX5_QP_OPTPAR_RWE
|
1402 MLX5_QP_OPTPAR_RNR_TIMEOUT
|
1403 MLX5_QP_OPTPAR_PM_STATE
|
1404 MLX5_QP_OPTPAR_ALT_ADDR_PATH
,
1405 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_RWE
|
1406 MLX5_QP_OPTPAR_PM_STATE
|
1407 MLX5_QP_OPTPAR_ALT_ADDR_PATH
,
1408 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_Q_KEY
|
1409 MLX5_QP_OPTPAR_SRQN
|
1410 MLX5_QP_OPTPAR_CQN_RCV
,
1413 [MLX5_QP_STATE_SQER
] = {
1414 [MLX5_QP_STATE_RTS
] = {
1415 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_Q_KEY
,
1416 [MLX5_QP_ST_MLX
] = MLX5_QP_OPTPAR_Q_KEY
,
1417 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_RWE
,
1418 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_RNR_TIMEOUT
|
1419 MLX5_QP_OPTPAR_RWE
|
1420 MLX5_QP_OPTPAR_RAE
|
1426 static int ib_nr_to_mlx5_nr(int ib_mask
)
1431 case IB_QP_CUR_STATE
:
1433 case IB_QP_EN_SQD_ASYNC_NOTIFY
:
1435 case IB_QP_ACCESS_FLAGS
:
1436 return MLX5_QP_OPTPAR_RWE
| MLX5_QP_OPTPAR_RRE
|
1438 case IB_QP_PKEY_INDEX
:
1439 return MLX5_QP_OPTPAR_PKEY_INDEX
;
1441 return MLX5_QP_OPTPAR_PRI_PORT
;
1443 return MLX5_QP_OPTPAR_Q_KEY
;
1445 return MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH
|
1446 MLX5_QP_OPTPAR_PRI_PORT
;
1447 case IB_QP_PATH_MTU
:
1450 return MLX5_QP_OPTPAR_ACK_TIMEOUT
;
1451 case IB_QP_RETRY_CNT
:
1452 return MLX5_QP_OPTPAR_RETRY_COUNT
;
1453 case IB_QP_RNR_RETRY
:
1454 return MLX5_QP_OPTPAR_RNR_RETRY
;
1457 case IB_QP_MAX_QP_RD_ATOMIC
:
1458 return MLX5_QP_OPTPAR_SRA_MAX
;
1459 case IB_QP_ALT_PATH
:
1460 return MLX5_QP_OPTPAR_ALT_ADDR_PATH
;
1461 case IB_QP_MIN_RNR_TIMER
:
1462 return MLX5_QP_OPTPAR_RNR_TIMEOUT
;
1465 case IB_QP_MAX_DEST_RD_ATOMIC
:
1466 return MLX5_QP_OPTPAR_RRA_MAX
| MLX5_QP_OPTPAR_RWE
|
1467 MLX5_QP_OPTPAR_RRE
| MLX5_QP_OPTPAR_RAE
;
1468 case IB_QP_PATH_MIG_STATE
:
1469 return MLX5_QP_OPTPAR_PM_STATE
;
1472 case IB_QP_DEST_QPN
:
1478 static int ib_mask_to_mlx5_opt(int ib_mask
)
1483 for (i
= 0; i
< 8 * sizeof(int); i
++) {
1484 if ((1 << i
) & ib_mask
)
1485 result
|= ib_nr_to_mlx5_nr(1 << i
);
1491 static int __mlx5_ib_modify_qp(struct ib_qp
*ibqp
,
1492 const struct ib_qp_attr
*attr
, int attr_mask
,
1493 enum ib_qp_state cur_state
, enum ib_qp_state new_state
)
1495 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
1496 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
1497 struct mlx5_ib_cq
*send_cq
, *recv_cq
;
1498 struct mlx5_qp_context
*context
;
1499 struct mlx5_general_caps
*gen
;
1500 struct mlx5_modify_qp_mbox_in
*in
;
1501 struct mlx5_ib_pd
*pd
;
1502 enum mlx5_qp_state mlx5_cur
, mlx5_new
;
1503 enum mlx5_qp_optpar optpar
;
1508 gen
= &dev
->mdev
->caps
.gen
;
1509 in
= kzalloc(sizeof(*in
), GFP_KERNEL
);
1514 err
= to_mlx5_st(ibqp
->qp_type
);
1518 context
->flags
= cpu_to_be32(err
<< 16);
1520 if (!(attr_mask
& IB_QP_PATH_MIG_STATE
)) {
1521 context
->flags
|= cpu_to_be32(MLX5_QP_PM_MIGRATED
<< 11);
1523 switch (attr
->path_mig_state
) {
1524 case IB_MIG_MIGRATED
:
1525 context
->flags
|= cpu_to_be32(MLX5_QP_PM_MIGRATED
<< 11);
1528 context
->flags
|= cpu_to_be32(MLX5_QP_PM_REARM
<< 11);
1531 context
->flags
|= cpu_to_be32(MLX5_QP_PM_ARMED
<< 11);
1536 if (ibqp
->qp_type
== IB_QPT_GSI
|| ibqp
->qp_type
== IB_QPT_SMI
) {
1537 context
->mtu_msgmax
= (IB_MTU_256
<< 5) | 8;
1538 } else if (ibqp
->qp_type
== IB_QPT_UD
||
1539 ibqp
->qp_type
== MLX5_IB_QPT_REG_UMR
) {
1540 context
->mtu_msgmax
= (IB_MTU_4096
<< 5) | 12;
1541 } else if (attr_mask
& IB_QP_PATH_MTU
) {
1542 if (attr
->path_mtu
< IB_MTU_256
||
1543 attr
->path_mtu
> IB_MTU_4096
) {
1544 mlx5_ib_warn(dev
, "invalid mtu %d\n", attr
->path_mtu
);
1548 context
->mtu_msgmax
= (attr
->path_mtu
<< 5) | gen
->log_max_msg
;
1551 if (attr_mask
& IB_QP_DEST_QPN
)
1552 context
->log_pg_sz_remote_qpn
= cpu_to_be32(attr
->dest_qp_num
);
1554 if (attr_mask
& IB_QP_PKEY_INDEX
)
1555 context
->pri_path
.pkey_index
= attr
->pkey_index
;
1557 /* todo implement counter_index functionality */
1559 if (is_sqp(ibqp
->qp_type
))
1560 context
->pri_path
.port
= qp
->port
;
1562 if (attr_mask
& IB_QP_PORT
)
1563 context
->pri_path
.port
= attr
->port_num
;
1565 if (attr_mask
& IB_QP_AV
) {
1566 err
= mlx5_set_path(dev
, &attr
->ah_attr
, &context
->pri_path
,
1567 attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
,
1568 attr_mask
, 0, attr
);
1573 if (attr_mask
& IB_QP_TIMEOUT
)
1574 context
->pri_path
.ackto_lt
|= attr
->timeout
<< 3;
1576 if (attr_mask
& IB_QP_ALT_PATH
) {
1577 err
= mlx5_set_path(dev
, &attr
->alt_ah_attr
, &context
->alt_path
,
1578 attr
->alt_port_num
, attr_mask
, 0, attr
);
1584 get_cqs(qp
, &send_cq
, &recv_cq
);
1586 context
->flags_pd
= cpu_to_be32(pd
? pd
->pdn
: to_mpd(dev
->devr
.p0
)->pdn
);
1587 context
->cqn_send
= send_cq
? cpu_to_be32(send_cq
->mcq
.cqn
) : 0;
1588 context
->cqn_recv
= recv_cq
? cpu_to_be32(recv_cq
->mcq
.cqn
) : 0;
1589 context
->params1
= cpu_to_be32(MLX5_IB_ACK_REQ_FREQ
<< 28);
1591 if (attr_mask
& IB_QP_RNR_RETRY
)
1592 context
->params1
|= cpu_to_be32(attr
->rnr_retry
<< 13);
1594 if (attr_mask
& IB_QP_RETRY_CNT
)
1595 context
->params1
|= cpu_to_be32(attr
->retry_cnt
<< 16);
1597 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
) {
1598 if (attr
->max_rd_atomic
)
1600 cpu_to_be32(fls(attr
->max_rd_atomic
- 1) << 21);
1603 if (attr_mask
& IB_QP_SQ_PSN
)
1604 context
->next_send_psn
= cpu_to_be32(attr
->sq_psn
);
1606 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
) {
1607 if (attr
->max_dest_rd_atomic
)
1609 cpu_to_be32(fls(attr
->max_dest_rd_atomic
- 1) << 21);
1612 if (attr_mask
& (IB_QP_ACCESS_FLAGS
| IB_QP_MAX_DEST_RD_ATOMIC
))
1613 context
->params2
|= to_mlx5_access_flags(qp
, attr
, attr_mask
);
1615 if (attr_mask
& IB_QP_MIN_RNR_TIMER
)
1616 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->min_rnr_timer
<< 24);
1618 if (attr_mask
& IB_QP_RQ_PSN
)
1619 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->rq_psn
);
1621 if (attr_mask
& IB_QP_QKEY
)
1622 context
->qkey
= cpu_to_be32(attr
->qkey
);
1624 if (qp
->rq
.wqe_cnt
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
1625 context
->db_rec_addr
= cpu_to_be64(qp
->db
.dma
);
1627 if (cur_state
== IB_QPS_RTS
&& new_state
== IB_QPS_SQD
&&
1628 attr_mask
& IB_QP_EN_SQD_ASYNC_NOTIFY
&& attr
->en_sqd_async_notify
)
1633 if (!ibqp
->uobject
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
1634 context
->sq_crq_size
|= cpu_to_be16(1 << 4);
1637 mlx5_cur
= to_mlx5_state(cur_state
);
1638 mlx5_new
= to_mlx5_state(new_state
);
1639 mlx5_st
= to_mlx5_st(ibqp
->qp_type
);
1643 optpar
= ib_mask_to_mlx5_opt(attr_mask
);
1644 optpar
&= opt_mask
[mlx5_cur
][mlx5_new
][mlx5_st
];
1645 in
->optparam
= cpu_to_be32(optpar
);
1646 err
= mlx5_core_qp_modify(dev
->mdev
, to_mlx5_state(cur_state
),
1647 to_mlx5_state(new_state
), in
, sqd_event
,
1652 qp
->state
= new_state
;
1654 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
1655 qp
->atomic_rd_en
= attr
->qp_access_flags
;
1656 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
1657 qp
->resp_depth
= attr
->max_dest_rd_atomic
;
1658 if (attr_mask
& IB_QP_PORT
)
1659 qp
->port
= attr
->port_num
;
1660 if (attr_mask
& IB_QP_ALT_PATH
)
1661 qp
->alt_port
= attr
->alt_port_num
;
1664 * If we moved a kernel QP to RESET, clean up all old CQ
1665 * entries and reinitialize the QP.
1667 if (new_state
== IB_QPS_RESET
&& !ibqp
->uobject
) {
1668 mlx5_ib_cq_clean(recv_cq
, qp
->mqp
.qpn
,
1669 ibqp
->srq
? to_msrq(ibqp
->srq
) : NULL
);
1670 if (send_cq
!= recv_cq
)
1671 mlx5_ib_cq_clean(send_cq
, qp
->mqp
.qpn
, NULL
);
1677 qp
->sq
.cur_post
= 0;
1678 qp
->sq
.last_poll
= 0;
1679 qp
->db
.db
[MLX5_RCV_DBR
] = 0;
1680 qp
->db
.db
[MLX5_SND_DBR
] = 0;
1688 int mlx5_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
1689 int attr_mask
, struct ib_udata
*udata
)
1691 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
1692 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
1693 enum ib_qp_state cur_state
, new_state
;
1694 struct mlx5_general_caps
*gen
;
1698 gen
= &dev
->mdev
->caps
.gen
;
1699 mutex_lock(&qp
->mutex
);
1701 cur_state
= attr_mask
& IB_QP_CUR_STATE
? attr
->cur_qp_state
: qp
->state
;
1702 new_state
= attr_mask
& IB_QP_STATE
? attr
->qp_state
: cur_state
;
1704 if (ibqp
->qp_type
!= MLX5_IB_QPT_REG_UMR
&&
1705 !ib_modify_qp_is_ok(cur_state
, new_state
, ibqp
->qp_type
, attr_mask
,
1706 IB_LINK_LAYER_UNSPECIFIED
))
1709 if ((attr_mask
& IB_QP_PORT
) &&
1710 (attr
->port_num
== 0 || attr
->port_num
> gen
->num_ports
))
1713 if (attr_mask
& IB_QP_PKEY_INDEX
) {
1714 port
= attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
;
1715 if (attr
->pkey_index
>= gen
->port
[port
- 1].pkey_table_len
)
1719 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
&&
1720 attr
->max_rd_atomic
> (1 << gen
->log_max_ra_res_qp
))
1723 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
&&
1724 attr
->max_dest_rd_atomic
> (1 << gen
->log_max_ra_req_qp
))
1727 if (cur_state
== new_state
&& cur_state
== IB_QPS_RESET
) {
1732 err
= __mlx5_ib_modify_qp(ibqp
, attr
, attr_mask
, cur_state
, new_state
);
1735 mutex_unlock(&qp
->mutex
);
1739 static int mlx5_wq_overflow(struct mlx5_ib_wq
*wq
, int nreq
, struct ib_cq
*ib_cq
)
1741 struct mlx5_ib_cq
*cq
;
1744 cur
= wq
->head
- wq
->tail
;
1745 if (likely(cur
+ nreq
< wq
->max_post
))
1749 spin_lock(&cq
->lock
);
1750 cur
= wq
->head
- wq
->tail
;
1751 spin_unlock(&cq
->lock
);
1753 return cur
+ nreq
>= wq
->max_post
;
1756 static __always_inline
void set_raddr_seg(struct mlx5_wqe_raddr_seg
*rseg
,
1757 u64 remote_addr
, u32 rkey
)
1759 rseg
->raddr
= cpu_to_be64(remote_addr
);
1760 rseg
->rkey
= cpu_to_be32(rkey
);
1764 static void set_datagram_seg(struct mlx5_wqe_datagram_seg
*dseg
,
1765 struct ib_send_wr
*wr
)
1767 memcpy(&dseg
->av
, &to_mah(wr
->wr
.ud
.ah
)->av
, sizeof(struct mlx5_av
));
1768 dseg
->av
.dqp_dct
= cpu_to_be32(wr
->wr
.ud
.remote_qpn
| MLX5_EXTENDED_UD_AV
);
1769 dseg
->av
.key
.qkey
.qkey
= cpu_to_be32(wr
->wr
.ud
.remote_qkey
);
1772 static void set_data_ptr_seg(struct mlx5_wqe_data_seg
*dseg
, struct ib_sge
*sg
)
1774 dseg
->byte_count
= cpu_to_be32(sg
->length
);
1775 dseg
->lkey
= cpu_to_be32(sg
->lkey
);
1776 dseg
->addr
= cpu_to_be64(sg
->addr
);
1779 static __be16
get_klm_octo(int npages
)
1781 return cpu_to_be16(ALIGN(npages
, 8) / 2);
1784 static __be64
frwr_mkey_mask(void)
1788 result
= MLX5_MKEY_MASK_LEN
|
1789 MLX5_MKEY_MASK_PAGE_SIZE
|
1790 MLX5_MKEY_MASK_START_ADDR
|
1791 MLX5_MKEY_MASK_EN_RINVAL
|
1792 MLX5_MKEY_MASK_KEY
|
1798 MLX5_MKEY_MASK_SMALL_FENCE
|
1799 MLX5_MKEY_MASK_FREE
;
1801 return cpu_to_be64(result
);
1804 static __be64
sig_mkey_mask(void)
1808 result
= MLX5_MKEY_MASK_LEN
|
1809 MLX5_MKEY_MASK_PAGE_SIZE
|
1810 MLX5_MKEY_MASK_START_ADDR
|
1811 MLX5_MKEY_MASK_EN_SIGERR
|
1812 MLX5_MKEY_MASK_EN_RINVAL
|
1813 MLX5_MKEY_MASK_KEY
|
1818 MLX5_MKEY_MASK_SMALL_FENCE
|
1819 MLX5_MKEY_MASK_FREE
|
1820 MLX5_MKEY_MASK_BSF_EN
;
1822 return cpu_to_be64(result
);
1825 static void set_frwr_umr_segment(struct mlx5_wqe_umr_ctrl_seg
*umr
,
1826 struct ib_send_wr
*wr
, int li
)
1828 memset(umr
, 0, sizeof(*umr
));
1831 umr
->mkey_mask
= cpu_to_be64(MLX5_MKEY_MASK_FREE
);
1832 umr
->flags
= 1 << 7;
1836 umr
->flags
= (1 << 5); /* fail if not free */
1837 umr
->klm_octowords
= get_klm_octo(wr
->wr
.fast_reg
.page_list_len
);
1838 umr
->mkey_mask
= frwr_mkey_mask();
1841 static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg
*umr
,
1842 struct ib_send_wr
*wr
)
1844 struct umr_wr
*umrwr
= (struct umr_wr
*)&wr
->wr
.fast_reg
;
1847 memset(umr
, 0, sizeof(*umr
));
1849 if (!(wr
->send_flags
& MLX5_IB_SEND_UMR_UNREG
)) {
1850 umr
->flags
= 1 << 5; /* fail if not free */
1851 umr
->klm_octowords
= get_klm_octo(umrwr
->npages
);
1852 mask
= MLX5_MKEY_MASK_LEN
|
1853 MLX5_MKEY_MASK_PAGE_SIZE
|
1854 MLX5_MKEY_MASK_START_ADDR
|
1858 MLX5_MKEY_MASK_KEY
|
1862 MLX5_MKEY_MASK_FREE
;
1863 umr
->mkey_mask
= cpu_to_be64(mask
);
1865 umr
->flags
= 2 << 5; /* fail if free */
1866 mask
= MLX5_MKEY_MASK_FREE
;
1867 umr
->mkey_mask
= cpu_to_be64(mask
);
1871 umr
->flags
|= (1 << 7); /* inline */
1874 static u8
get_umr_flags(int acc
)
1876 return (acc
& IB_ACCESS_REMOTE_ATOMIC
? MLX5_PERM_ATOMIC
: 0) |
1877 (acc
& IB_ACCESS_REMOTE_WRITE
? MLX5_PERM_REMOTE_WRITE
: 0) |
1878 (acc
& IB_ACCESS_REMOTE_READ
? MLX5_PERM_REMOTE_READ
: 0) |
1879 (acc
& IB_ACCESS_LOCAL_WRITE
? MLX5_PERM_LOCAL_WRITE
: 0) |
1880 MLX5_PERM_LOCAL_READ
| MLX5_PERM_UMR_EN
;
1883 static void set_mkey_segment(struct mlx5_mkey_seg
*seg
, struct ib_send_wr
*wr
,
1886 memset(seg
, 0, sizeof(*seg
));
1888 seg
->status
= 1 << 6;
1892 seg
->flags
= get_umr_flags(wr
->wr
.fast_reg
.access_flags
) |
1893 MLX5_ACCESS_MODE_MTT
;
1894 *writ
= seg
->flags
& (MLX5_PERM_LOCAL_WRITE
| IB_ACCESS_REMOTE_WRITE
);
1895 seg
->qpn_mkey7_0
= cpu_to_be32((wr
->wr
.fast_reg
.rkey
& 0xff) | 0xffffff00);
1896 seg
->flags_pd
= cpu_to_be32(MLX5_MKEY_REMOTE_INVAL
);
1897 seg
->start_addr
= cpu_to_be64(wr
->wr
.fast_reg
.iova_start
);
1898 seg
->len
= cpu_to_be64(wr
->wr
.fast_reg
.length
);
1899 seg
->xlt_oct_size
= cpu_to_be32((wr
->wr
.fast_reg
.page_list_len
+ 1) / 2);
1900 seg
->log2_page_size
= wr
->wr
.fast_reg
.page_shift
;
1903 static void set_reg_mkey_segment(struct mlx5_mkey_seg
*seg
, struct ib_send_wr
*wr
)
1905 memset(seg
, 0, sizeof(*seg
));
1906 if (wr
->send_flags
& MLX5_IB_SEND_UMR_UNREG
) {
1907 seg
->status
= 1 << 6;
1911 seg
->flags
= convert_access(wr
->wr
.fast_reg
.access_flags
);
1912 seg
->flags_pd
= cpu_to_be32(to_mpd((struct ib_pd
*)wr
->wr
.fast_reg
.page_list
)->pdn
);
1913 seg
->start_addr
= cpu_to_be64(wr
->wr
.fast_reg
.iova_start
);
1914 seg
->len
= cpu_to_be64(wr
->wr
.fast_reg
.length
);
1915 seg
->log2_page_size
= wr
->wr
.fast_reg
.page_shift
;
1916 seg
->qpn_mkey7_0
= cpu_to_be32(0xffffff00 |
1917 mlx5_mkey_variant(wr
->wr
.fast_reg
.rkey
));
1920 static void set_frwr_pages(struct mlx5_wqe_data_seg
*dseg
,
1921 struct ib_send_wr
*wr
,
1922 struct mlx5_core_dev
*mdev
,
1923 struct mlx5_ib_pd
*pd
,
1926 struct mlx5_ib_fast_reg_page_list
*mfrpl
= to_mfrpl(wr
->wr
.fast_reg
.page_list
);
1927 u64
*page_list
= wr
->wr
.fast_reg
.page_list
->page_list
;
1928 u64 perm
= MLX5_EN_RD
| (writ
? MLX5_EN_WR
: 0);
1931 for (i
= 0; i
< wr
->wr
.fast_reg
.page_list_len
; i
++)
1932 mfrpl
->mapped_page_list
[i
] = cpu_to_be64(page_list
[i
] | perm
);
1933 dseg
->addr
= cpu_to_be64(mfrpl
->map
);
1934 dseg
->byte_count
= cpu_to_be32(ALIGN(sizeof(u64
) * wr
->wr
.fast_reg
.page_list_len
, 64));
1935 dseg
->lkey
= cpu_to_be32(pd
->pa_lkey
);
1938 static __be32
send_ieth(struct ib_send_wr
*wr
)
1940 switch (wr
->opcode
) {
1941 case IB_WR_SEND_WITH_IMM
:
1942 case IB_WR_RDMA_WRITE_WITH_IMM
:
1943 return wr
->ex
.imm_data
;
1945 case IB_WR_SEND_WITH_INV
:
1946 return cpu_to_be32(wr
->ex
.invalidate_rkey
);
1953 static u8
calc_sig(void *wqe
, int size
)
1959 for (i
= 0; i
< size
; i
++)
1965 static u8
wq_sig(void *wqe
)
1967 return calc_sig(wqe
, (*((u8
*)wqe
+ 8) & 0x3f) << 4);
1970 static int set_data_inl_seg(struct mlx5_ib_qp
*qp
, struct ib_send_wr
*wr
,
1973 struct mlx5_wqe_inline_seg
*seg
;
1974 void *qend
= qp
->sq
.qend
;
1982 wqe
+= sizeof(*seg
);
1983 for (i
= 0; i
< wr
->num_sge
; i
++) {
1984 addr
= (void *)(unsigned long)(wr
->sg_list
[i
].addr
);
1985 len
= wr
->sg_list
[i
].length
;
1988 if (unlikely(inl
> qp
->max_inline_data
))
1991 if (unlikely(wqe
+ len
> qend
)) {
1993 memcpy(wqe
, addr
, copy
);
1996 wqe
= mlx5_get_send_wqe(qp
, 0);
1998 memcpy(wqe
, addr
, len
);
2002 seg
->byte_count
= cpu_to_be32(inl
| MLX5_INLINE_SEG
);
2004 *sz
= ALIGN(inl
+ sizeof(seg
->byte_count
), 16) / 16;
2009 static u16
prot_field_size(enum ib_signature_type type
)
2012 case IB_SIG_TYPE_T10_DIF
:
2013 return MLX5_DIF_SIZE
;
2019 static u8
bs_selector(int block_size
)
2021 switch (block_size
) {
2022 case 512: return 0x1;
2023 case 520: return 0x2;
2024 case 4096: return 0x3;
2025 case 4160: return 0x4;
2026 case 1073741824: return 0x5;
2031 static void mlx5_fill_inl_bsf(struct ib_sig_domain
*domain
,
2032 struct mlx5_bsf_inl
*inl
)
2034 /* Valid inline section and allow BSF refresh */
2035 inl
->vld_refresh
= cpu_to_be16(MLX5_BSF_INL_VALID
|
2036 MLX5_BSF_REFRESH_DIF
);
2037 inl
->dif_apptag
= cpu_to_be16(domain
->sig
.dif
.app_tag
);
2038 inl
->dif_reftag
= cpu_to_be32(domain
->sig
.dif
.ref_tag
);
2039 /* repeating block */
2040 inl
->rp_inv_seed
= MLX5_BSF_REPEAT_BLOCK
;
2041 inl
->sig_type
= domain
->sig
.dif
.bg_type
== IB_T10DIF_CRC
?
2042 MLX5_DIF_CRC
: MLX5_DIF_IPCS
;
2044 if (domain
->sig
.dif
.ref_remap
)
2045 inl
->dif_inc_ref_guard_check
|= MLX5_BSF_INC_REFTAG
;
2047 if (domain
->sig
.dif
.app_escape
) {
2048 if (domain
->sig
.dif
.ref_escape
)
2049 inl
->dif_inc_ref_guard_check
|= MLX5_BSF_APPREF_ESCAPE
;
2051 inl
->dif_inc_ref_guard_check
|= MLX5_BSF_APPTAG_ESCAPE
;
2054 inl
->dif_app_bitmask_check
=
2055 cpu_to_be16(domain
->sig
.dif
.apptag_check_mask
);
2058 static int mlx5_set_bsf(struct ib_mr
*sig_mr
,
2059 struct ib_sig_attrs
*sig_attrs
,
2060 struct mlx5_bsf
*bsf
, u32 data_size
)
2062 struct mlx5_core_sig_ctx
*msig
= to_mmr(sig_mr
)->sig
;
2063 struct mlx5_bsf_basic
*basic
= &bsf
->basic
;
2064 struct ib_sig_domain
*mem
= &sig_attrs
->mem
;
2065 struct ib_sig_domain
*wire
= &sig_attrs
->wire
;
2067 memset(bsf
, 0, sizeof(*bsf
));
2069 /* Basic + Extended + Inline */
2070 basic
->bsf_size_sbs
= 1 << 7;
2071 /* Input domain check byte mask */
2072 basic
->check_byte_mask
= sig_attrs
->check_mask
;
2073 basic
->raw_data_size
= cpu_to_be32(data_size
);
2076 switch (sig_attrs
->mem
.sig_type
) {
2077 case IB_SIG_TYPE_NONE
:
2079 case IB_SIG_TYPE_T10_DIF
:
2080 basic
->mem
.bs_selector
= bs_selector(mem
->sig
.dif
.pi_interval
);
2081 basic
->m_bfs_psv
= cpu_to_be32(msig
->psv_memory
.psv_idx
);
2082 mlx5_fill_inl_bsf(mem
, &bsf
->m_inl
);
2089 switch (sig_attrs
->wire
.sig_type
) {
2090 case IB_SIG_TYPE_NONE
:
2092 case IB_SIG_TYPE_T10_DIF
:
2093 if (mem
->sig
.dif
.pi_interval
== wire
->sig
.dif
.pi_interval
&&
2094 mem
->sig_type
== wire
->sig_type
) {
2095 /* Same block structure */
2096 basic
->bsf_size_sbs
|= 1 << 4;
2097 if (mem
->sig
.dif
.bg_type
== wire
->sig
.dif
.bg_type
)
2098 basic
->wire
.copy_byte_mask
|= MLX5_CPY_GRD_MASK
;
2099 if (mem
->sig
.dif
.app_tag
== wire
->sig
.dif
.app_tag
)
2100 basic
->wire
.copy_byte_mask
|= MLX5_CPY_APP_MASK
;
2101 if (mem
->sig
.dif
.ref_tag
== wire
->sig
.dif
.ref_tag
)
2102 basic
->wire
.copy_byte_mask
|= MLX5_CPY_REF_MASK
;
2104 basic
->wire
.bs_selector
= bs_selector(wire
->sig
.dif
.pi_interval
);
2106 basic
->w_bfs_psv
= cpu_to_be32(msig
->psv_wire
.psv_idx
);
2107 mlx5_fill_inl_bsf(wire
, &bsf
->w_inl
);
2116 static int set_sig_data_segment(struct ib_send_wr
*wr
, struct mlx5_ib_qp
*qp
,
2117 void **seg
, int *size
)
2119 struct ib_sig_attrs
*sig_attrs
= wr
->wr
.sig_handover
.sig_attrs
;
2120 struct ib_mr
*sig_mr
= wr
->wr
.sig_handover
.sig_mr
;
2121 struct mlx5_bsf
*bsf
;
2122 u32 data_len
= wr
->sg_list
->length
;
2123 u32 data_key
= wr
->sg_list
->lkey
;
2124 u64 data_va
= wr
->sg_list
->addr
;
2128 if (!wr
->wr
.sig_handover
.prot
||
2129 (data_key
== wr
->wr
.sig_handover
.prot
->lkey
&&
2130 data_va
== wr
->wr
.sig_handover
.prot
->addr
&&
2131 data_len
== wr
->wr
.sig_handover
.prot
->length
)) {
2133 * Source domain doesn't contain signature information
2134 * or data and protection are interleaved in memory.
2135 * So need construct:
2136 * ------------------
2138 * ------------------
2140 * ------------------
2142 struct mlx5_klm
*data_klm
= *seg
;
2144 data_klm
->bcount
= cpu_to_be32(data_len
);
2145 data_klm
->key
= cpu_to_be32(data_key
);
2146 data_klm
->va
= cpu_to_be64(data_va
);
2147 wqe_size
= ALIGN(sizeof(*data_klm
), 64);
2150 * Source domain contains signature information
2151 * So need construct a strided block format:
2152 * ---------------------------
2153 * | stride_block_ctrl |
2154 * ---------------------------
2156 * ---------------------------
2158 * ---------------------------
2160 * ---------------------------
2162 struct mlx5_stride_block_ctrl_seg
*sblock_ctrl
;
2163 struct mlx5_stride_block_entry
*data_sentry
;
2164 struct mlx5_stride_block_entry
*prot_sentry
;
2165 u32 prot_key
= wr
->wr
.sig_handover
.prot
->lkey
;
2166 u64 prot_va
= wr
->wr
.sig_handover
.prot
->addr
;
2167 u16 block_size
= sig_attrs
->mem
.sig
.dif
.pi_interval
;
2171 data_sentry
= (void *)sblock_ctrl
+ sizeof(*sblock_ctrl
);
2172 prot_sentry
= (void *)data_sentry
+ sizeof(*data_sentry
);
2174 prot_size
= prot_field_size(sig_attrs
->mem
.sig_type
);
2176 pr_err("Bad block size given: %u\n", block_size
);
2179 sblock_ctrl
->bcount_per_cycle
= cpu_to_be32(block_size
+
2181 sblock_ctrl
->op
= cpu_to_be32(MLX5_STRIDE_BLOCK_OP
);
2182 sblock_ctrl
->repeat_count
= cpu_to_be32(data_len
/ block_size
);
2183 sblock_ctrl
->num_entries
= cpu_to_be16(2);
2185 data_sentry
->bcount
= cpu_to_be16(block_size
);
2186 data_sentry
->key
= cpu_to_be32(data_key
);
2187 data_sentry
->va
= cpu_to_be64(data_va
);
2188 data_sentry
->stride
= cpu_to_be16(block_size
);
2190 prot_sentry
->bcount
= cpu_to_be16(prot_size
);
2191 prot_sentry
->key
= cpu_to_be32(prot_key
);
2192 prot_sentry
->va
= cpu_to_be64(prot_va
);
2193 prot_sentry
->stride
= cpu_to_be16(prot_size
);
2195 wqe_size
= ALIGN(sizeof(*sblock_ctrl
) + sizeof(*data_sentry
) +
2196 sizeof(*prot_sentry
), 64);
2200 *size
+= wqe_size
/ 16;
2201 if (unlikely((*seg
== qp
->sq
.qend
)))
2202 *seg
= mlx5_get_send_wqe(qp
, 0);
2205 ret
= mlx5_set_bsf(sig_mr
, sig_attrs
, bsf
, data_len
);
2209 *seg
+= sizeof(*bsf
);
2210 *size
+= sizeof(*bsf
) / 16;
2211 if (unlikely((*seg
== qp
->sq
.qend
)))
2212 *seg
= mlx5_get_send_wqe(qp
, 0);
2217 static void set_sig_mkey_segment(struct mlx5_mkey_seg
*seg
,
2218 struct ib_send_wr
*wr
, u32 nelements
,
2219 u32 length
, u32 pdn
)
2221 struct ib_mr
*sig_mr
= wr
->wr
.sig_handover
.sig_mr
;
2222 u32 sig_key
= sig_mr
->rkey
;
2223 u8 sigerr
= to_mmr(sig_mr
)->sig
->sigerr_count
& 1;
2225 memset(seg
, 0, sizeof(*seg
));
2227 seg
->flags
= get_umr_flags(wr
->wr
.sig_handover
.access_flags
) |
2228 MLX5_ACCESS_MODE_KLM
;
2229 seg
->qpn_mkey7_0
= cpu_to_be32((sig_key
& 0xff) | 0xffffff00);
2230 seg
->flags_pd
= cpu_to_be32(MLX5_MKEY_REMOTE_INVAL
| sigerr
<< 26 |
2231 MLX5_MKEY_BSF_EN
| pdn
);
2232 seg
->len
= cpu_to_be64(length
);
2233 seg
->xlt_oct_size
= cpu_to_be32(be16_to_cpu(get_klm_octo(nelements
)));
2234 seg
->bsfs_octo_size
= cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE
);
2237 static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg
*umr
,
2238 struct ib_send_wr
*wr
, u32 nelements
)
2240 memset(umr
, 0, sizeof(*umr
));
2242 umr
->flags
= MLX5_FLAGS_INLINE
| MLX5_FLAGS_CHECK_FREE
;
2243 umr
->klm_octowords
= get_klm_octo(nelements
);
2244 umr
->bsf_octowords
= cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE
);
2245 umr
->mkey_mask
= sig_mkey_mask();
2249 static int set_sig_umr_wr(struct ib_send_wr
*wr
, struct mlx5_ib_qp
*qp
,
2250 void **seg
, int *size
)
2252 struct mlx5_ib_mr
*sig_mr
= to_mmr(wr
->wr
.sig_handover
.sig_mr
);
2253 u32 pdn
= get_pd(qp
)->pdn
;
2255 int region_len
, ret
;
2257 if (unlikely(wr
->num_sge
!= 1) ||
2258 unlikely(wr
->wr
.sig_handover
.access_flags
&
2259 IB_ACCESS_REMOTE_ATOMIC
) ||
2260 unlikely(!sig_mr
->sig
) || unlikely(!qp
->signature_en
) ||
2261 unlikely(!sig_mr
->sig
->sig_status_checked
))
2264 /* length of the protected region, data + protection */
2265 region_len
= wr
->sg_list
->length
;
2266 if (wr
->wr
.sig_handover
.prot
&&
2267 (wr
->wr
.sig_handover
.prot
->lkey
!= wr
->sg_list
->lkey
||
2268 wr
->wr
.sig_handover
.prot
->addr
!= wr
->sg_list
->addr
||
2269 wr
->wr
.sig_handover
.prot
->length
!= wr
->sg_list
->length
))
2270 region_len
+= wr
->wr
.sig_handover
.prot
->length
;
2273 * KLM octoword size - if protection was provided
2274 * then we use strided block format (3 octowords),
2275 * else we use single KLM (1 octoword)
2277 klm_oct_size
= wr
->wr
.sig_handover
.prot
? 3 : 1;
2279 set_sig_umr_segment(*seg
, wr
, klm_oct_size
);
2280 *seg
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
);
2281 *size
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
) / 16;
2282 if (unlikely((*seg
== qp
->sq
.qend
)))
2283 *seg
= mlx5_get_send_wqe(qp
, 0);
2285 set_sig_mkey_segment(*seg
, wr
, klm_oct_size
, region_len
, pdn
);
2286 *seg
+= sizeof(struct mlx5_mkey_seg
);
2287 *size
+= sizeof(struct mlx5_mkey_seg
) / 16;
2288 if (unlikely((*seg
== qp
->sq
.qend
)))
2289 *seg
= mlx5_get_send_wqe(qp
, 0);
2291 ret
= set_sig_data_segment(wr
, qp
, seg
, size
);
2295 sig_mr
->sig
->sig_status_checked
= false;
2299 static int set_psv_wr(struct ib_sig_domain
*domain
,
2300 u32 psv_idx
, void **seg
, int *size
)
2302 struct mlx5_seg_set_psv
*psv_seg
= *seg
;
2304 memset(psv_seg
, 0, sizeof(*psv_seg
));
2305 psv_seg
->psv_num
= cpu_to_be32(psv_idx
);
2306 switch (domain
->sig_type
) {
2307 case IB_SIG_TYPE_NONE
:
2309 case IB_SIG_TYPE_T10_DIF
:
2310 psv_seg
->transient_sig
= cpu_to_be32(domain
->sig
.dif
.bg
<< 16 |
2311 domain
->sig
.dif
.app_tag
);
2312 psv_seg
->ref_tag
= cpu_to_be32(domain
->sig
.dif
.ref_tag
);
2315 pr_err("Bad signature type given.\n");
2319 *seg
+= sizeof(*psv_seg
);
2320 *size
+= sizeof(*psv_seg
) / 16;
2325 static int set_frwr_li_wr(void **seg
, struct ib_send_wr
*wr
, int *size
,
2326 struct mlx5_core_dev
*mdev
, struct mlx5_ib_pd
*pd
, struct mlx5_ib_qp
*qp
)
2331 li
= wr
->opcode
== IB_WR_LOCAL_INV
? 1 : 0;
2332 if (unlikely(wr
->send_flags
& IB_SEND_INLINE
))
2335 set_frwr_umr_segment(*seg
, wr
, li
);
2336 *seg
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
);
2337 *size
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
) / 16;
2338 if (unlikely((*seg
== qp
->sq
.qend
)))
2339 *seg
= mlx5_get_send_wqe(qp
, 0);
2340 set_mkey_segment(*seg
, wr
, li
, &writ
);
2341 *seg
+= sizeof(struct mlx5_mkey_seg
);
2342 *size
+= sizeof(struct mlx5_mkey_seg
) / 16;
2343 if (unlikely((*seg
== qp
->sq
.qend
)))
2344 *seg
= mlx5_get_send_wqe(qp
, 0);
2346 if (unlikely(wr
->wr
.fast_reg
.page_list_len
>
2347 wr
->wr
.fast_reg
.page_list
->max_page_list_len
))
2350 set_frwr_pages(*seg
, wr
, mdev
, pd
, writ
);
2351 *seg
+= sizeof(struct mlx5_wqe_data_seg
);
2352 *size
+= (sizeof(struct mlx5_wqe_data_seg
) / 16);
2357 static void dump_wqe(struct mlx5_ib_qp
*qp
, int idx
, int size_16
)
2363 pr_debug("dump wqe at %p\n", mlx5_get_send_wqe(qp
, tidx
));
2364 for (i
= 0, j
= 0; i
< size_16
* 4; i
+= 4, j
+= 4) {
2365 if ((i
& 0xf) == 0) {
2366 void *buf
= mlx5_get_send_wqe(qp
, tidx
);
2367 tidx
= (tidx
+ 1) & (qp
->sq
.wqe_cnt
- 1);
2371 pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p
[j
]),
2372 be32_to_cpu(p
[j
+ 1]), be32_to_cpu(p
[j
+ 2]),
2373 be32_to_cpu(p
[j
+ 3]));
2377 static void mlx5_bf_copy(u64 __iomem
*dst
, u64
*src
,
2378 unsigned bytecnt
, struct mlx5_ib_qp
*qp
)
2380 while (bytecnt
> 0) {
2381 __iowrite64_copy(dst
++, src
++, 8);
2382 __iowrite64_copy(dst
++, src
++, 8);
2383 __iowrite64_copy(dst
++, src
++, 8);
2384 __iowrite64_copy(dst
++, src
++, 8);
2385 __iowrite64_copy(dst
++, src
++, 8);
2386 __iowrite64_copy(dst
++, src
++, 8);
2387 __iowrite64_copy(dst
++, src
++, 8);
2388 __iowrite64_copy(dst
++, src
++, 8);
2390 if (unlikely(src
== qp
->sq
.qend
))
2391 src
= mlx5_get_send_wqe(qp
, 0);
2395 static u8
get_fence(u8 fence
, struct ib_send_wr
*wr
)
2397 if (unlikely(wr
->opcode
== IB_WR_LOCAL_INV
&&
2398 wr
->send_flags
& IB_SEND_FENCE
))
2399 return MLX5_FENCE_MODE_STRONG_ORDERING
;
2401 if (unlikely(fence
)) {
2402 if (wr
->send_flags
& IB_SEND_FENCE
)
2403 return MLX5_FENCE_MODE_SMALL_AND_FENCE
;
2412 static int begin_wqe(struct mlx5_ib_qp
*qp
, void **seg
,
2413 struct mlx5_wqe_ctrl_seg
**ctrl
,
2414 struct ib_send_wr
*wr
, int *idx
,
2415 int *size
, int nreq
)
2419 if (unlikely(mlx5_wq_overflow(&qp
->sq
, nreq
, qp
->ibqp
.send_cq
))) {
2424 *idx
= qp
->sq
.cur_post
& (qp
->sq
.wqe_cnt
- 1);
2425 *seg
= mlx5_get_send_wqe(qp
, *idx
);
2427 *(uint32_t *)(*seg
+ 8) = 0;
2428 (*ctrl
)->imm
= send_ieth(wr
);
2429 (*ctrl
)->fm_ce_se
= qp
->sq_signal_bits
|
2430 (wr
->send_flags
& IB_SEND_SIGNALED
?
2431 MLX5_WQE_CTRL_CQ_UPDATE
: 0) |
2432 (wr
->send_flags
& IB_SEND_SOLICITED
?
2433 MLX5_WQE_CTRL_SOLICITED
: 0);
2435 *seg
+= sizeof(**ctrl
);
2436 *size
= sizeof(**ctrl
) / 16;
2441 static void finish_wqe(struct mlx5_ib_qp
*qp
,
2442 struct mlx5_wqe_ctrl_seg
*ctrl
,
2443 u8 size
, unsigned idx
, u64 wr_id
,
2444 int nreq
, u8 fence
, u8 next_fence
,
2449 ctrl
->opmod_idx_opcode
= cpu_to_be32(((u32
)(qp
->sq
.cur_post
) << 8) |
2450 mlx5_opcode
| ((u32
)opmod
<< 24));
2451 ctrl
->qpn_ds
= cpu_to_be32(size
| (qp
->mqp
.qpn
<< 8));
2452 ctrl
->fm_ce_se
|= fence
;
2453 qp
->fm_cache
= next_fence
;
2454 if (unlikely(qp
->wq_sig
))
2455 ctrl
->signature
= wq_sig(ctrl
);
2457 qp
->sq
.wrid
[idx
] = wr_id
;
2458 qp
->sq
.w_list
[idx
].opcode
= mlx5_opcode
;
2459 qp
->sq
.wqe_head
[idx
] = qp
->sq
.head
+ nreq
;
2460 qp
->sq
.cur_post
+= DIV_ROUND_UP(size
* 16, MLX5_SEND_WQE_BB
);
2461 qp
->sq
.w_list
[idx
].next
= qp
->sq
.cur_post
;
2465 int mlx5_ib_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
2466 struct ib_send_wr
**bad_wr
)
2468 struct mlx5_wqe_ctrl_seg
*ctrl
= NULL
; /* compiler warning */
2469 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
2470 struct mlx5_core_dev
*mdev
= dev
->mdev
;
2471 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
2472 struct mlx5_ib_mr
*mr
;
2473 struct mlx5_wqe_data_seg
*dpseg
;
2474 struct mlx5_wqe_xrc_seg
*xrc
;
2475 struct mlx5_bf
*bf
= qp
->bf
;
2476 int uninitialized_var(size
);
2477 void *qend
= qp
->sq
.qend
;
2478 unsigned long flags
;
2489 spin_lock_irqsave(&qp
->sq
.lock
, flags
);
2491 for (nreq
= 0; wr
; nreq
++, wr
= wr
->next
) {
2492 if (unlikely(wr
->opcode
>= ARRAY_SIZE(mlx5_ib_opcode
))) {
2493 mlx5_ib_warn(dev
, "\n");
2499 fence
= qp
->fm_cache
;
2500 num_sge
= wr
->num_sge
;
2501 if (unlikely(num_sge
> qp
->sq
.max_gs
)) {
2502 mlx5_ib_warn(dev
, "\n");
2508 err
= begin_wqe(qp
, &seg
, &ctrl
, wr
, &idx
, &size
, nreq
);
2510 mlx5_ib_warn(dev
, "\n");
2516 switch (ibqp
->qp_type
) {
2517 case IB_QPT_XRC_INI
:
2519 xrc
->xrc_srqn
= htonl(wr
->xrc_remote_srq_num
);
2520 seg
+= sizeof(*xrc
);
2521 size
+= sizeof(*xrc
) / 16;
2524 switch (wr
->opcode
) {
2525 case IB_WR_RDMA_READ
:
2526 case IB_WR_RDMA_WRITE
:
2527 case IB_WR_RDMA_WRITE_WITH_IMM
:
2528 set_raddr_seg(seg
, wr
->wr
.rdma
.remote_addr
,
2530 seg
+= sizeof(struct mlx5_wqe_raddr_seg
);
2531 size
+= sizeof(struct mlx5_wqe_raddr_seg
) / 16;
2534 case IB_WR_ATOMIC_CMP_AND_SWP
:
2535 case IB_WR_ATOMIC_FETCH_AND_ADD
:
2536 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP
:
2537 mlx5_ib_warn(dev
, "Atomic operations are not supported yet\n");
2542 case IB_WR_LOCAL_INV
:
2543 next_fence
= MLX5_FENCE_MODE_INITIATOR_SMALL
;
2544 qp
->sq
.wr_data
[idx
] = IB_WR_LOCAL_INV
;
2545 ctrl
->imm
= cpu_to_be32(wr
->ex
.invalidate_rkey
);
2546 err
= set_frwr_li_wr(&seg
, wr
, &size
, mdev
, to_mpd(ibqp
->pd
), qp
);
2548 mlx5_ib_warn(dev
, "\n");
2555 case IB_WR_FAST_REG_MR
:
2556 next_fence
= MLX5_FENCE_MODE_INITIATOR_SMALL
;
2557 qp
->sq
.wr_data
[idx
] = IB_WR_FAST_REG_MR
;
2558 ctrl
->imm
= cpu_to_be32(wr
->wr
.fast_reg
.rkey
);
2559 err
= set_frwr_li_wr(&seg
, wr
, &size
, mdev
, to_mpd(ibqp
->pd
), qp
);
2561 mlx5_ib_warn(dev
, "\n");
2568 case IB_WR_REG_SIG_MR
:
2569 qp
->sq
.wr_data
[idx
] = IB_WR_REG_SIG_MR
;
2570 mr
= to_mmr(wr
->wr
.sig_handover
.sig_mr
);
2572 ctrl
->imm
= cpu_to_be32(mr
->ibmr
.rkey
);
2573 err
= set_sig_umr_wr(wr
, qp
, &seg
, &size
);
2575 mlx5_ib_warn(dev
, "\n");
2580 finish_wqe(qp
, ctrl
, size
, idx
, wr
->wr_id
,
2581 nreq
, get_fence(fence
, wr
),
2582 next_fence
, MLX5_OPCODE_UMR
);
2584 * SET_PSV WQEs are not signaled and solicited
2587 wr
->send_flags
&= ~IB_SEND_SIGNALED
;
2588 wr
->send_flags
|= IB_SEND_SOLICITED
;
2589 err
= begin_wqe(qp
, &seg
, &ctrl
, wr
,
2592 mlx5_ib_warn(dev
, "\n");
2598 err
= set_psv_wr(&wr
->wr
.sig_handover
.sig_attrs
->mem
,
2599 mr
->sig
->psv_memory
.psv_idx
, &seg
,
2602 mlx5_ib_warn(dev
, "\n");
2607 finish_wqe(qp
, ctrl
, size
, idx
, wr
->wr_id
,
2608 nreq
, get_fence(fence
, wr
),
2609 next_fence
, MLX5_OPCODE_SET_PSV
);
2610 err
= begin_wqe(qp
, &seg
, &ctrl
, wr
,
2613 mlx5_ib_warn(dev
, "\n");
2619 next_fence
= MLX5_FENCE_MODE_INITIATOR_SMALL
;
2620 err
= set_psv_wr(&wr
->wr
.sig_handover
.sig_attrs
->wire
,
2621 mr
->sig
->psv_wire
.psv_idx
, &seg
,
2624 mlx5_ib_warn(dev
, "\n");
2629 finish_wqe(qp
, ctrl
, size
, idx
, wr
->wr_id
,
2630 nreq
, get_fence(fence
, wr
),
2631 next_fence
, MLX5_OPCODE_SET_PSV
);
2641 switch (wr
->opcode
) {
2642 case IB_WR_RDMA_WRITE
:
2643 case IB_WR_RDMA_WRITE_WITH_IMM
:
2644 set_raddr_seg(seg
, wr
->wr
.rdma
.remote_addr
,
2646 seg
+= sizeof(struct mlx5_wqe_raddr_seg
);
2647 size
+= sizeof(struct mlx5_wqe_raddr_seg
) / 16;
2658 set_datagram_seg(seg
, wr
);
2659 seg
+= sizeof(struct mlx5_wqe_datagram_seg
);
2660 size
+= sizeof(struct mlx5_wqe_datagram_seg
) / 16;
2661 if (unlikely((seg
== qend
)))
2662 seg
= mlx5_get_send_wqe(qp
, 0);
2665 case MLX5_IB_QPT_REG_UMR
:
2666 if (wr
->opcode
!= MLX5_IB_WR_UMR
) {
2668 mlx5_ib_warn(dev
, "bad opcode\n");
2671 qp
->sq
.wr_data
[idx
] = MLX5_IB_WR_UMR
;
2672 ctrl
->imm
= cpu_to_be32(wr
->wr
.fast_reg
.rkey
);
2673 set_reg_umr_segment(seg
, wr
);
2674 seg
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
);
2675 size
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
) / 16;
2676 if (unlikely((seg
== qend
)))
2677 seg
= mlx5_get_send_wqe(qp
, 0);
2678 set_reg_mkey_segment(seg
, wr
);
2679 seg
+= sizeof(struct mlx5_mkey_seg
);
2680 size
+= sizeof(struct mlx5_mkey_seg
) / 16;
2681 if (unlikely((seg
== qend
)))
2682 seg
= mlx5_get_send_wqe(qp
, 0);
2689 if (wr
->send_flags
& IB_SEND_INLINE
&& num_sge
) {
2690 int uninitialized_var(sz
);
2692 err
= set_data_inl_seg(qp
, wr
, seg
, &sz
);
2693 if (unlikely(err
)) {
2694 mlx5_ib_warn(dev
, "\n");
2702 for (i
= 0; i
< num_sge
; i
++) {
2703 if (unlikely(dpseg
== qend
)) {
2704 seg
= mlx5_get_send_wqe(qp
, 0);
2707 if (likely(wr
->sg_list
[i
].length
)) {
2708 set_data_ptr_seg(dpseg
, wr
->sg_list
+ i
);
2709 size
+= sizeof(struct mlx5_wqe_data_seg
) / 16;
2715 finish_wqe(qp
, ctrl
, size
, idx
, wr
->wr_id
, nreq
,
2716 get_fence(fence
, wr
), next_fence
,
2717 mlx5_ib_opcode
[wr
->opcode
]);
2720 dump_wqe(qp
, idx
, size
);
2725 qp
->sq
.head
+= nreq
;
2727 /* Make sure that descriptors are written before
2728 * updating doorbell record and ringing the doorbell
2732 qp
->db
.db
[MLX5_SND_DBR
] = cpu_to_be32(qp
->sq
.cur_post
);
2734 /* Make sure doorbell record is visible to the HCA before
2735 * we hit doorbell */
2739 spin_lock(&bf
->lock
);
2742 if (0 && nreq
== 1 && bf
->uuarn
&& inl
&& size
> 1 && size
<= bf
->buf_size
/ 16) {
2743 mlx5_bf_copy(bf
->reg
+ bf
->offset
, (u64
*)ctrl
, ALIGN(size
* 16, 64), qp
);
2746 mlx5_write64((__be32
*)ctrl
, bf
->regreg
+ bf
->offset
,
2747 MLX5_GET_DOORBELL_LOCK(&bf
->lock32
));
2748 /* Make sure doorbells don't leak out of SQ spinlock
2749 * and reach the HCA out of order.
2753 bf
->offset
^= bf
->buf_size
;
2755 spin_unlock(&bf
->lock
);
2758 spin_unlock_irqrestore(&qp
->sq
.lock
, flags
);
2763 static void set_sig_seg(struct mlx5_rwqe_sig
*sig
, int size
)
2765 sig
->signature
= calc_sig(sig
, size
);
2768 int mlx5_ib_post_recv(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
2769 struct ib_recv_wr
**bad_wr
)
2771 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
2772 struct mlx5_wqe_data_seg
*scat
;
2773 struct mlx5_rwqe_sig
*sig
;
2774 unsigned long flags
;
2780 spin_lock_irqsave(&qp
->rq
.lock
, flags
);
2782 ind
= qp
->rq
.head
& (qp
->rq
.wqe_cnt
- 1);
2784 for (nreq
= 0; wr
; nreq
++, wr
= wr
->next
) {
2785 if (mlx5_wq_overflow(&qp
->rq
, nreq
, qp
->ibqp
.recv_cq
)) {
2791 if (unlikely(wr
->num_sge
> qp
->rq
.max_gs
)) {
2797 scat
= get_recv_wqe(qp
, ind
);
2801 for (i
= 0; i
< wr
->num_sge
; i
++)
2802 set_data_ptr_seg(scat
+ i
, wr
->sg_list
+ i
);
2804 if (i
< qp
->rq
.max_gs
) {
2805 scat
[i
].byte_count
= 0;
2806 scat
[i
].lkey
= cpu_to_be32(MLX5_INVALID_LKEY
);
2811 sig
= (struct mlx5_rwqe_sig
*)scat
;
2812 set_sig_seg(sig
, (qp
->rq
.max_gs
+ 1) << 2);
2815 qp
->rq
.wrid
[ind
] = wr
->wr_id
;
2817 ind
= (ind
+ 1) & (qp
->rq
.wqe_cnt
- 1);
2822 qp
->rq
.head
+= nreq
;
2824 /* Make sure that descriptors are written before
2829 *qp
->db
.db
= cpu_to_be32(qp
->rq
.head
& 0xffff);
2832 spin_unlock_irqrestore(&qp
->rq
.lock
, flags
);
2837 static inline enum ib_qp_state
to_ib_qp_state(enum mlx5_qp_state mlx5_state
)
2839 switch (mlx5_state
) {
2840 case MLX5_QP_STATE_RST
: return IB_QPS_RESET
;
2841 case MLX5_QP_STATE_INIT
: return IB_QPS_INIT
;
2842 case MLX5_QP_STATE_RTR
: return IB_QPS_RTR
;
2843 case MLX5_QP_STATE_RTS
: return IB_QPS_RTS
;
2844 case MLX5_QP_STATE_SQ_DRAINING
:
2845 case MLX5_QP_STATE_SQD
: return IB_QPS_SQD
;
2846 case MLX5_QP_STATE_SQER
: return IB_QPS_SQE
;
2847 case MLX5_QP_STATE_ERR
: return IB_QPS_ERR
;
2852 static inline enum ib_mig_state
to_ib_mig_state(int mlx5_mig_state
)
2854 switch (mlx5_mig_state
) {
2855 case MLX5_QP_PM_ARMED
: return IB_MIG_ARMED
;
2856 case MLX5_QP_PM_REARM
: return IB_MIG_REARM
;
2857 case MLX5_QP_PM_MIGRATED
: return IB_MIG_MIGRATED
;
2862 static int to_ib_qp_access_flags(int mlx5_flags
)
2866 if (mlx5_flags
& MLX5_QP_BIT_RRE
)
2867 ib_flags
|= IB_ACCESS_REMOTE_READ
;
2868 if (mlx5_flags
& MLX5_QP_BIT_RWE
)
2869 ib_flags
|= IB_ACCESS_REMOTE_WRITE
;
2870 if (mlx5_flags
& MLX5_QP_BIT_RAE
)
2871 ib_flags
|= IB_ACCESS_REMOTE_ATOMIC
;
2876 static void to_ib_ah_attr(struct mlx5_ib_dev
*ibdev
, struct ib_ah_attr
*ib_ah_attr
,
2877 struct mlx5_qp_path
*path
)
2879 struct mlx5_core_dev
*dev
= ibdev
->mdev
;
2881 memset(ib_ah_attr
, 0, sizeof(*ib_ah_attr
));
2882 ib_ah_attr
->port_num
= path
->port
;
2884 if (ib_ah_attr
->port_num
== 0 ||
2885 ib_ah_attr
->port_num
> dev
->caps
.gen
.num_ports
)
2888 ib_ah_attr
->sl
= path
->sl
& 0xf;
2890 ib_ah_attr
->dlid
= be16_to_cpu(path
->rlid
);
2891 ib_ah_attr
->src_path_bits
= path
->grh_mlid
& 0x7f;
2892 ib_ah_attr
->static_rate
= path
->static_rate
? path
->static_rate
- 5 : 0;
2893 ib_ah_attr
->ah_flags
= (path
->grh_mlid
& (1 << 7)) ? IB_AH_GRH
: 0;
2894 if (ib_ah_attr
->ah_flags
) {
2895 ib_ah_attr
->grh
.sgid_index
= path
->mgid_index
;
2896 ib_ah_attr
->grh
.hop_limit
= path
->hop_limit
;
2897 ib_ah_attr
->grh
.traffic_class
=
2898 (be32_to_cpu(path
->tclass_flowlabel
) >> 20) & 0xff;
2899 ib_ah_attr
->grh
.flow_label
=
2900 be32_to_cpu(path
->tclass_flowlabel
) & 0xfffff;
2901 memcpy(ib_ah_attr
->grh
.dgid
.raw
,
2902 path
->rgid
, sizeof(ib_ah_attr
->grh
.dgid
.raw
));
2906 int mlx5_ib_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*qp_attr
, int qp_attr_mask
,
2907 struct ib_qp_init_attr
*qp_init_attr
)
2909 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
2910 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
2911 struct mlx5_query_qp_mbox_out
*outb
;
2912 struct mlx5_qp_context
*context
;
2916 mutex_lock(&qp
->mutex
);
2917 outb
= kzalloc(sizeof(*outb
), GFP_KERNEL
);
2922 context
= &outb
->ctx
;
2923 err
= mlx5_core_qp_query(dev
->mdev
, &qp
->mqp
, outb
, sizeof(*outb
));
2927 mlx5_state
= be32_to_cpu(context
->flags
) >> 28;
2929 qp
->state
= to_ib_qp_state(mlx5_state
);
2930 qp_attr
->qp_state
= qp
->state
;
2931 qp_attr
->path_mtu
= context
->mtu_msgmax
>> 5;
2932 qp_attr
->path_mig_state
=
2933 to_ib_mig_state((be32_to_cpu(context
->flags
) >> 11) & 0x3);
2934 qp_attr
->qkey
= be32_to_cpu(context
->qkey
);
2935 qp_attr
->rq_psn
= be32_to_cpu(context
->rnr_nextrecvpsn
) & 0xffffff;
2936 qp_attr
->sq_psn
= be32_to_cpu(context
->next_send_psn
) & 0xffffff;
2937 qp_attr
->dest_qp_num
= be32_to_cpu(context
->log_pg_sz_remote_qpn
) & 0xffffff;
2938 qp_attr
->qp_access_flags
=
2939 to_ib_qp_access_flags(be32_to_cpu(context
->params2
));
2941 if (qp
->ibqp
.qp_type
== IB_QPT_RC
|| qp
->ibqp
.qp_type
== IB_QPT_UC
) {
2942 to_ib_ah_attr(dev
, &qp_attr
->ah_attr
, &context
->pri_path
);
2943 to_ib_ah_attr(dev
, &qp_attr
->alt_ah_attr
, &context
->alt_path
);
2944 qp_attr
->alt_pkey_index
= context
->alt_path
.pkey_index
& 0x7f;
2945 qp_attr
->alt_port_num
= qp_attr
->alt_ah_attr
.port_num
;
2948 qp_attr
->pkey_index
= context
->pri_path
.pkey_index
& 0x7f;
2949 qp_attr
->port_num
= context
->pri_path
.port
;
2951 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
2952 qp_attr
->sq_draining
= mlx5_state
== MLX5_QP_STATE_SQ_DRAINING
;
2954 qp_attr
->max_rd_atomic
= 1 << ((be32_to_cpu(context
->params1
) >> 21) & 0x7);
2956 qp_attr
->max_dest_rd_atomic
=
2957 1 << ((be32_to_cpu(context
->params2
) >> 21) & 0x7);
2958 qp_attr
->min_rnr_timer
=
2959 (be32_to_cpu(context
->rnr_nextrecvpsn
) >> 24) & 0x1f;
2960 qp_attr
->timeout
= context
->pri_path
.ackto_lt
>> 3;
2961 qp_attr
->retry_cnt
= (be32_to_cpu(context
->params1
) >> 16) & 0x7;
2962 qp_attr
->rnr_retry
= (be32_to_cpu(context
->params1
) >> 13) & 0x7;
2963 qp_attr
->alt_timeout
= context
->alt_path
.ackto_lt
>> 3;
2964 qp_attr
->cur_qp_state
= qp_attr
->qp_state
;
2965 qp_attr
->cap
.max_recv_wr
= qp
->rq
.wqe_cnt
;
2966 qp_attr
->cap
.max_recv_sge
= qp
->rq
.max_gs
;
2968 if (!ibqp
->uobject
) {
2969 qp_attr
->cap
.max_send_wr
= qp
->sq
.wqe_cnt
;
2970 qp_attr
->cap
.max_send_sge
= qp
->sq
.max_gs
;
2972 qp_attr
->cap
.max_send_wr
= 0;
2973 qp_attr
->cap
.max_send_sge
= 0;
2976 /* We don't support inline sends for kernel QPs (yet), and we
2977 * don't know what userspace's value should be.
2979 qp_attr
->cap
.max_inline_data
= 0;
2981 qp_init_attr
->cap
= qp_attr
->cap
;
2983 qp_init_attr
->create_flags
= 0;
2984 if (qp
->flags
& MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK
)
2985 qp_init_attr
->create_flags
|= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
;
2987 qp_init_attr
->sq_sig_type
= qp
->sq_signal_bits
& MLX5_WQE_CTRL_CQ_UPDATE
?
2988 IB_SIGNAL_ALL_WR
: IB_SIGNAL_REQ_WR
;
2994 mutex_unlock(&qp
->mutex
);
2998 struct ib_xrcd
*mlx5_ib_alloc_xrcd(struct ib_device
*ibdev
,
2999 struct ib_ucontext
*context
,
3000 struct ib_udata
*udata
)
3002 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
3003 struct mlx5_general_caps
*gen
;
3004 struct mlx5_ib_xrcd
*xrcd
;
3007 gen
= &dev
->mdev
->caps
.gen
;
3008 if (!(gen
->flags
& MLX5_DEV_CAP_FLAG_XRC
))
3009 return ERR_PTR(-ENOSYS
);
3011 xrcd
= kmalloc(sizeof(*xrcd
), GFP_KERNEL
);
3013 return ERR_PTR(-ENOMEM
);
3015 err
= mlx5_core_xrcd_alloc(dev
->mdev
, &xrcd
->xrcdn
);
3018 return ERR_PTR(-ENOMEM
);
3021 return &xrcd
->ibxrcd
;
3024 int mlx5_ib_dealloc_xrcd(struct ib_xrcd
*xrcd
)
3026 struct mlx5_ib_dev
*dev
= to_mdev(xrcd
->device
);
3027 u32 xrcdn
= to_mxrcd(xrcd
)->xrcdn
;
3030 err
= mlx5_core_xrcd_dealloc(dev
->mdev
, xrcdn
);
3032 mlx5_ib_warn(dev
, "failed to dealloc xrcdn 0x%x\n", xrcdn
);