2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
34 #include <rdma/ib_umem.h>
38 /* not supported currently */
39 static int wq_signature
;
42 MLX5_IB_ACK_REQ_FREQ
= 8,
46 MLX5_IB_DEFAULT_SCHED_QUEUE
= 0x83,
47 MLX5_IB_DEFAULT_QP0_SCHED_QUEUE
= 0x3f,
48 MLX5_IB_LINK_TYPE_IB
= 0,
49 MLX5_IB_LINK_TYPE_ETH
= 1
53 MLX5_IB_SQ_STRIDE
= 6,
54 MLX5_IB_CACHE_LINE_SIZE
= 64,
57 static const u32 mlx5_ib_opcode
[] = {
58 [IB_WR_SEND
] = MLX5_OPCODE_SEND
,
59 [IB_WR_SEND_WITH_IMM
] = MLX5_OPCODE_SEND_IMM
,
60 [IB_WR_RDMA_WRITE
] = MLX5_OPCODE_RDMA_WRITE
,
61 [IB_WR_RDMA_WRITE_WITH_IMM
] = MLX5_OPCODE_RDMA_WRITE_IMM
,
62 [IB_WR_RDMA_READ
] = MLX5_OPCODE_RDMA_READ
,
63 [IB_WR_ATOMIC_CMP_AND_SWP
] = MLX5_OPCODE_ATOMIC_CS
,
64 [IB_WR_ATOMIC_FETCH_AND_ADD
] = MLX5_OPCODE_ATOMIC_FA
,
65 [IB_WR_SEND_WITH_INV
] = MLX5_OPCODE_SEND_INVAL
,
66 [IB_WR_LOCAL_INV
] = MLX5_OPCODE_UMR
,
67 [IB_WR_FAST_REG_MR
] = MLX5_OPCODE_UMR
,
68 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP
] = MLX5_OPCODE_ATOMIC_MASKED_CS
,
69 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
] = MLX5_OPCODE_ATOMIC_MASKED_FA
,
70 [MLX5_IB_WR_UMR
] = MLX5_OPCODE_UMR
,
76 unsigned int page_shift
;
83 static int is_qp0(enum ib_qp_type qp_type
)
85 return qp_type
== IB_QPT_SMI
;
88 static int is_qp1(enum ib_qp_type qp_type
)
90 return qp_type
== IB_QPT_GSI
;
93 static int is_sqp(enum ib_qp_type qp_type
)
95 return is_qp0(qp_type
) || is_qp1(qp_type
);
98 static void *get_wqe(struct mlx5_ib_qp
*qp
, int offset
)
100 return mlx5_buf_offset(&qp
->buf
, offset
);
103 static void *get_recv_wqe(struct mlx5_ib_qp
*qp
, int n
)
105 return get_wqe(qp
, qp
->rq
.offset
+ (n
<< qp
->rq
.wqe_shift
));
108 void *mlx5_get_send_wqe(struct mlx5_ib_qp
*qp
, int n
)
110 return get_wqe(qp
, qp
->sq
.offset
+ (n
<< MLX5_IB_SQ_STRIDE
));
113 static void mlx5_ib_qp_event(struct mlx5_core_qp
*qp
, int type
)
115 struct ib_qp
*ibqp
= &to_mibqp(qp
)->ibqp
;
116 struct ib_event event
;
118 if (type
== MLX5_EVENT_TYPE_PATH_MIG
)
119 to_mibqp(qp
)->port
= to_mibqp(qp
)->alt_port
;
121 if (ibqp
->event_handler
) {
122 event
.device
= ibqp
->device
;
123 event
.element
.qp
= ibqp
;
125 case MLX5_EVENT_TYPE_PATH_MIG
:
126 event
.event
= IB_EVENT_PATH_MIG
;
128 case MLX5_EVENT_TYPE_COMM_EST
:
129 event
.event
= IB_EVENT_COMM_EST
;
131 case MLX5_EVENT_TYPE_SQ_DRAINED
:
132 event
.event
= IB_EVENT_SQ_DRAINED
;
134 case MLX5_EVENT_TYPE_SRQ_LAST_WQE
:
135 event
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
137 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR
:
138 event
.event
= IB_EVENT_QP_FATAL
;
140 case MLX5_EVENT_TYPE_PATH_MIG_FAILED
:
141 event
.event
= IB_EVENT_PATH_MIG_ERR
;
143 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR
:
144 event
.event
= IB_EVENT_QP_REQ_ERR
;
146 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR
:
147 event
.event
= IB_EVENT_QP_ACCESS_ERR
;
150 pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type
, qp
->qpn
);
154 ibqp
->event_handler(&event
, ibqp
->qp_context
);
158 static int set_rq_size(struct mlx5_ib_dev
*dev
, struct ib_qp_cap
*cap
,
159 int has_rq
, struct mlx5_ib_qp
*qp
, struct mlx5_ib_create_qp
*ucmd
)
164 /* Sanity check RQ size before proceeding */
165 if (cap
->max_recv_wr
> dev
->mdev
.caps
.max_wqes
)
171 qp
->rq
.wqe_shift
= 0;
174 qp
->rq
.wqe_cnt
= ucmd
->rq_wqe_count
;
175 qp
->rq
.wqe_shift
= ucmd
->rq_wqe_shift
;
176 qp
->rq
.max_gs
= (1 << qp
->rq
.wqe_shift
) / sizeof(struct mlx5_wqe_data_seg
) - qp
->wq_sig
;
177 qp
->rq
.max_post
= qp
->rq
.wqe_cnt
;
179 wqe_size
= qp
->wq_sig
? sizeof(struct mlx5_wqe_signature_seg
) : 0;
180 wqe_size
+= cap
->max_recv_sge
* sizeof(struct mlx5_wqe_data_seg
);
181 wqe_size
= roundup_pow_of_two(wqe_size
);
182 wq_size
= roundup_pow_of_two(cap
->max_recv_wr
) * wqe_size
;
183 wq_size
= max_t(int, wq_size
, MLX5_SEND_WQE_BB
);
184 qp
->rq
.wqe_cnt
= wq_size
/ wqe_size
;
185 if (wqe_size
> dev
->mdev
.caps
.max_rq_desc_sz
) {
186 mlx5_ib_dbg(dev
, "wqe_size %d, max %d\n",
188 dev
->mdev
.caps
.max_rq_desc_sz
);
191 qp
->rq
.wqe_shift
= ilog2(wqe_size
);
192 qp
->rq
.max_gs
= (1 << qp
->rq
.wqe_shift
) / sizeof(struct mlx5_wqe_data_seg
) - qp
->wq_sig
;
193 qp
->rq
.max_post
= qp
->rq
.wqe_cnt
;
200 static int sq_overhead(enum ib_qp_type qp_type
)
206 size
= sizeof(struct mlx5_wqe_xrc_seg
);
209 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
210 sizeof(struct mlx5_wqe_atomic_seg
) +
211 sizeof(struct mlx5_wqe_raddr_seg
);
215 size
= sizeof(struct mlx5_wqe_ctrl_seg
) +
216 sizeof(struct mlx5_wqe_raddr_seg
);
222 size
= sizeof(struct mlx5_wqe_ctrl_seg
) +
223 sizeof(struct mlx5_wqe_datagram_seg
);
226 case MLX5_IB_QPT_REG_UMR
:
227 size
= sizeof(struct mlx5_wqe_ctrl_seg
) +
228 sizeof(struct mlx5_wqe_umr_ctrl_seg
) +
229 sizeof(struct mlx5_mkey_seg
);
239 static int calc_send_wqe(struct ib_qp_init_attr
*attr
)
244 size
= sq_overhead(attr
->qp_type
);
248 if (attr
->cap
.max_inline_data
) {
249 inl_size
= size
+ sizeof(struct mlx5_wqe_inline_seg
) +
250 attr
->cap
.max_inline_data
;
253 size
+= attr
->cap
.max_send_sge
* sizeof(struct mlx5_wqe_data_seg
);
255 return ALIGN(max_t(int, inl_size
, size
), MLX5_SEND_WQE_BB
);
258 static int calc_sq_size(struct mlx5_ib_dev
*dev
, struct ib_qp_init_attr
*attr
,
259 struct mlx5_ib_qp
*qp
)
264 if (!attr
->cap
.max_send_wr
)
267 wqe_size
= calc_send_wqe(attr
);
268 mlx5_ib_dbg(dev
, "wqe_size %d\n", wqe_size
);
272 if (wqe_size
> dev
->mdev
.caps
.max_sq_desc_sz
) {
273 mlx5_ib_dbg(dev
, "\n");
277 qp
->max_inline_data
= wqe_size
- sq_overhead(attr
->qp_type
) -
278 sizeof(struct mlx5_wqe_inline_seg
);
279 attr
->cap
.max_inline_data
= qp
->max_inline_data
;
281 wq_size
= roundup_pow_of_two(attr
->cap
.max_send_wr
* wqe_size
);
282 qp
->sq
.wqe_cnt
= wq_size
/ MLX5_SEND_WQE_BB
;
283 qp
->sq
.wqe_shift
= ilog2(MLX5_SEND_WQE_BB
);
284 qp
->sq
.max_gs
= attr
->cap
.max_send_sge
;
285 qp
->sq
.max_post
= 1 << ilog2(wq_size
/ wqe_size
);
290 static int set_user_buf_size(struct mlx5_ib_dev
*dev
,
291 struct mlx5_ib_qp
*qp
,
292 struct mlx5_ib_create_qp
*ucmd
)
294 int desc_sz
= 1 << qp
->sq
.wqe_shift
;
296 if (desc_sz
> dev
->mdev
.caps
.max_sq_desc_sz
) {
297 mlx5_ib_warn(dev
, "desc_sz %d, max_sq_desc_sz %d\n",
298 desc_sz
, dev
->mdev
.caps
.max_sq_desc_sz
);
302 if (ucmd
->sq_wqe_count
&& ((1 << ilog2(ucmd
->sq_wqe_count
)) != ucmd
->sq_wqe_count
)) {
303 mlx5_ib_warn(dev
, "sq_wqe_count %d, sq_wqe_count %d\n",
304 ucmd
->sq_wqe_count
, ucmd
->sq_wqe_count
);
308 qp
->sq
.wqe_cnt
= ucmd
->sq_wqe_count
;
310 if (qp
->sq
.wqe_cnt
> dev
->mdev
.caps
.max_wqes
) {
311 mlx5_ib_warn(dev
, "wqe_cnt %d, max_wqes %d\n",
312 qp
->sq
.wqe_cnt
, dev
->mdev
.caps
.max_wqes
);
316 qp
->buf_size
= (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
) +
317 (qp
->sq
.wqe_cnt
<< 6);
322 static int qp_has_rq(struct ib_qp_init_attr
*attr
)
324 if (attr
->qp_type
== IB_QPT_XRC_INI
||
325 attr
->qp_type
== IB_QPT_XRC_TGT
|| attr
->srq
||
326 attr
->qp_type
== MLX5_IB_QPT_REG_UMR
||
327 !attr
->cap
.max_recv_wr
)
333 static int alloc_high_class_uuar(struct mlx5_uuar_info
*uuari
)
335 int nuuars
= uuari
->num_uars
* MLX5_BF_REGS_PER_PAGE
;
339 start_uuar
= nuuars
- uuari
->num_low_latency_uuars
;
340 for (i
= start_uuar
; i
< nuuars
; i
++) {
341 if (!test_bit(i
, uuari
->bitmap
)) {
342 set_bit(i
, uuari
->bitmap
);
351 static int alloc_med_class_uuar(struct mlx5_uuar_info
*uuari
)
353 int nuuars
= uuari
->num_uars
* MLX5_BF_REGS_PER_PAGE
;
359 end
= nuuars
- uuari
->num_low_latency_uuars
;
361 for (i
= 1; i
< end
; i
++) {
363 if (uuarn
== 2 || uuarn
== 3)
366 if (uuari
->count
[i
] < uuari
->count
[minidx
])
370 uuari
->count
[minidx
]++;
374 static int alloc_uuar(struct mlx5_uuar_info
*uuari
,
375 enum mlx5_ib_latency_class lat
)
379 mutex_lock(&uuari
->lock
);
381 case MLX5_IB_LATENCY_CLASS_LOW
:
383 uuari
->count
[uuarn
]++;
386 case MLX5_IB_LATENCY_CLASS_MEDIUM
:
387 uuarn
= alloc_med_class_uuar(uuari
);
390 case MLX5_IB_LATENCY_CLASS_HIGH
:
391 uuarn
= alloc_high_class_uuar(uuari
);
394 case MLX5_IB_LATENCY_CLASS_FAST_PATH
:
398 mutex_unlock(&uuari
->lock
);
403 static void free_med_class_uuar(struct mlx5_uuar_info
*uuari
, int uuarn
)
405 clear_bit(uuarn
, uuari
->bitmap
);
406 --uuari
->count
[uuarn
];
409 static void free_high_class_uuar(struct mlx5_uuar_info
*uuari
, int uuarn
)
411 clear_bit(uuarn
, uuari
->bitmap
);
412 --uuari
->count
[uuarn
];
415 static void free_uuar(struct mlx5_uuar_info
*uuari
, int uuarn
)
417 int nuuars
= uuari
->num_uars
* MLX5_BF_REGS_PER_PAGE
;
418 int high_uuar
= nuuars
- uuari
->num_low_latency_uuars
;
420 mutex_lock(&uuari
->lock
);
422 --uuari
->count
[uuarn
];
426 if (uuarn
< high_uuar
) {
427 free_med_class_uuar(uuari
, uuarn
);
431 free_high_class_uuar(uuari
, uuarn
);
434 mutex_unlock(&uuari
->lock
);
437 static enum mlx5_qp_state
to_mlx5_state(enum ib_qp_state state
)
440 case IB_QPS_RESET
: return MLX5_QP_STATE_RST
;
441 case IB_QPS_INIT
: return MLX5_QP_STATE_INIT
;
442 case IB_QPS_RTR
: return MLX5_QP_STATE_RTR
;
443 case IB_QPS_RTS
: return MLX5_QP_STATE_RTS
;
444 case IB_QPS_SQD
: return MLX5_QP_STATE_SQD
;
445 case IB_QPS_SQE
: return MLX5_QP_STATE_SQER
;
446 case IB_QPS_ERR
: return MLX5_QP_STATE_ERR
;
451 static int to_mlx5_st(enum ib_qp_type type
)
454 case IB_QPT_RC
: return MLX5_QP_ST_RC
;
455 case IB_QPT_UC
: return MLX5_QP_ST_UC
;
456 case IB_QPT_UD
: return MLX5_QP_ST_UD
;
457 case MLX5_IB_QPT_REG_UMR
: return MLX5_QP_ST_REG_UMR
;
459 case IB_QPT_XRC_TGT
: return MLX5_QP_ST_XRC
;
460 case IB_QPT_SMI
: return MLX5_QP_ST_QP0
;
461 case IB_QPT_GSI
: return MLX5_QP_ST_QP1
;
462 case IB_QPT_RAW_IPV6
: return MLX5_QP_ST_RAW_IPV6
;
463 case IB_QPT_RAW_ETHERTYPE
: return MLX5_QP_ST_RAW_ETHERTYPE
;
464 case IB_QPT_RAW_PACKET
:
466 default: return -EINVAL
;
470 static int uuarn_to_uar_index(struct mlx5_uuar_info
*uuari
, int uuarn
)
472 return uuari
->uars
[uuarn
/ MLX5_BF_REGS_PER_PAGE
].index
;
475 static int create_user_qp(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
476 struct mlx5_ib_qp
*qp
, struct ib_udata
*udata
,
477 struct mlx5_create_qp_mbox_in
**in
,
478 struct mlx5_ib_create_qp_resp
*resp
, int *inlen
)
480 struct mlx5_ib_ucontext
*context
;
481 struct mlx5_ib_create_qp ucmd
;
490 err
= ib_copy_from_udata(&ucmd
, udata
, sizeof(ucmd
));
492 mlx5_ib_dbg(dev
, "copy failed\n");
496 context
= to_mucontext(pd
->uobject
->context
);
498 * TBD: should come from the verbs when we have the API
500 uuarn
= alloc_uuar(&context
->uuari
, MLX5_IB_LATENCY_CLASS_HIGH
);
502 mlx5_ib_dbg(dev
, "failed to allocate low latency UUAR\n");
503 mlx5_ib_dbg(dev
, "reverting to high latency\n");
504 uuarn
= alloc_uuar(&context
->uuari
, MLX5_IB_LATENCY_CLASS_LOW
);
506 mlx5_ib_dbg(dev
, "uuar allocation failed\n");
511 uar_index
= uuarn_to_uar_index(&context
->uuari
, uuarn
);
512 mlx5_ib_dbg(dev
, "uuarn 0x%x, uar_index 0x%x\n", uuarn
, uar_index
);
514 err
= set_user_buf_size(dev
, qp
, &ucmd
);
518 qp
->umem
= ib_umem_get(pd
->uobject
->context
, ucmd
.buf_addr
,
520 if (IS_ERR(qp
->umem
)) {
521 mlx5_ib_dbg(dev
, "umem_get failed\n");
522 err
= PTR_ERR(qp
->umem
);
526 mlx5_ib_cont_pages(qp
->umem
, ucmd
.buf_addr
, &npages
, &page_shift
,
528 err
= mlx5_ib_get_buf_offset(ucmd
.buf_addr
, page_shift
, &offset
);
530 mlx5_ib_warn(dev
, "bad offset\n");
533 mlx5_ib_dbg(dev
, "addr 0x%llx, size %d, npages %d, page_shift %d, ncont %d, offset %d\n",
534 ucmd
.buf_addr
, qp
->buf_size
, npages
, page_shift
, ncont
, offset
);
536 *inlen
= sizeof(**in
) + sizeof(*(*in
)->pas
) * ncont
;
537 *in
= mlx5_vzalloc(*inlen
);
542 mlx5_ib_populate_pas(dev
, qp
->umem
, page_shift
, (*in
)->pas
, 0);
543 (*in
)->ctx
.log_pg_sz_remote_qpn
=
544 cpu_to_be32((page_shift
- PAGE_SHIFT
) << 24);
545 (*in
)->ctx
.params2
= cpu_to_be32(offset
<< 6);
547 (*in
)->ctx
.qp_counter_set_usr_page
= cpu_to_be32(uar_index
);
548 resp
->uuar_index
= uuarn
;
551 err
= mlx5_ib_db_map_user(context
, ucmd
.db_addr
, &qp
->db
);
553 mlx5_ib_dbg(dev
, "map failed\n");
557 err
= ib_copy_to_udata(udata
, resp
, sizeof(*resp
));
559 mlx5_ib_dbg(dev
, "copy failed\n");
562 qp
->create_type
= MLX5_QP_USER
;
567 mlx5_ib_db_unmap_user(context
, &qp
->db
);
573 ib_umem_release(qp
->umem
);
576 free_uuar(&context
->uuari
, uuarn
);
580 static void destroy_qp_user(struct ib_pd
*pd
, struct mlx5_ib_qp
*qp
)
582 struct mlx5_ib_ucontext
*context
;
584 context
= to_mucontext(pd
->uobject
->context
);
585 mlx5_ib_db_unmap_user(context
, &qp
->db
);
586 ib_umem_release(qp
->umem
);
587 free_uuar(&context
->uuari
, qp
->uuarn
);
590 static int create_kernel_qp(struct mlx5_ib_dev
*dev
,
591 struct ib_qp_init_attr
*init_attr
,
592 struct mlx5_ib_qp
*qp
,
593 struct mlx5_create_qp_mbox_in
**in
, int *inlen
)
595 enum mlx5_ib_latency_class lc
= MLX5_IB_LATENCY_CLASS_LOW
;
596 struct mlx5_uuar_info
*uuari
;
601 uuari
= &dev
->mdev
.priv
.uuari
;
602 if (init_attr
->create_flags
& IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
)
603 qp
->flags
|= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK
;
605 if (init_attr
->qp_type
== MLX5_IB_QPT_REG_UMR
)
606 lc
= MLX5_IB_LATENCY_CLASS_FAST_PATH
;
608 uuarn
= alloc_uuar(uuari
, lc
);
610 mlx5_ib_dbg(dev
, "\n");
614 qp
->bf
= &uuari
->bfs
[uuarn
];
615 uar_index
= qp
->bf
->uar
->index
;
617 err
= calc_sq_size(dev
, init_attr
, qp
);
619 mlx5_ib_dbg(dev
, "err %d\n", err
);
624 qp
->sq
.offset
= qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
;
625 qp
->buf_size
= err
+ (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
);
627 err
= mlx5_buf_alloc(&dev
->mdev
, qp
->buf_size
, PAGE_SIZE
* 2, &qp
->buf
);
629 mlx5_ib_dbg(dev
, "err %d\n", err
);
633 qp
->sq
.qend
= mlx5_get_send_wqe(qp
, qp
->sq
.wqe_cnt
);
634 *inlen
= sizeof(**in
) + sizeof(*(*in
)->pas
) * qp
->buf
.npages
;
635 *in
= mlx5_vzalloc(*inlen
);
640 (*in
)->ctx
.qp_counter_set_usr_page
= cpu_to_be32(uar_index
);
641 (*in
)->ctx
.log_pg_sz_remote_qpn
= cpu_to_be32((qp
->buf
.page_shift
- PAGE_SHIFT
) << 24);
642 /* Set "fast registration enabled" for all kernel QPs */
643 (*in
)->ctx
.params1
|= cpu_to_be32(1 << 11);
644 (*in
)->ctx
.sq_crq_size
|= cpu_to_be16(1 << 4);
646 mlx5_fill_page_array(&qp
->buf
, (*in
)->pas
);
648 err
= mlx5_db_alloc(&dev
->mdev
, &qp
->db
);
650 mlx5_ib_dbg(dev
, "err %d\n", err
);
657 qp
->sq
.wrid
= kmalloc(qp
->sq
.wqe_cnt
* sizeof(*qp
->sq
.wrid
), GFP_KERNEL
);
658 qp
->sq
.wr_data
= kmalloc(qp
->sq
.wqe_cnt
* sizeof(*qp
->sq
.wr_data
), GFP_KERNEL
);
659 qp
->rq
.wrid
= kmalloc(qp
->rq
.wqe_cnt
* sizeof(*qp
->rq
.wrid
), GFP_KERNEL
);
660 qp
->sq
.w_list
= kmalloc(qp
->sq
.wqe_cnt
* sizeof(*qp
->sq
.w_list
), GFP_KERNEL
);
661 qp
->sq
.wqe_head
= kmalloc(qp
->sq
.wqe_cnt
* sizeof(*qp
->sq
.wqe_head
), GFP_KERNEL
);
663 if (!qp
->sq
.wrid
|| !qp
->sq
.wr_data
|| !qp
->rq
.wrid
||
664 !qp
->sq
.w_list
|| !qp
->sq
.wqe_head
) {
668 qp
->create_type
= MLX5_QP_KERNEL
;
673 mlx5_db_free(&dev
->mdev
, &qp
->db
);
674 kfree(qp
->sq
.wqe_head
);
675 kfree(qp
->sq
.w_list
);
677 kfree(qp
->sq
.wr_data
);
684 mlx5_buf_free(&dev
->mdev
, &qp
->buf
);
687 free_uuar(&dev
->mdev
.priv
.uuari
, uuarn
);
691 static void destroy_qp_kernel(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
)
693 mlx5_db_free(&dev
->mdev
, &qp
->db
);
694 kfree(qp
->sq
.wqe_head
);
695 kfree(qp
->sq
.w_list
);
697 kfree(qp
->sq
.wr_data
);
699 mlx5_buf_free(&dev
->mdev
, &qp
->buf
);
700 free_uuar(&dev
->mdev
.priv
.uuari
, qp
->bf
->uuarn
);
703 static __be32
get_rx_type(struct mlx5_ib_qp
*qp
, struct ib_qp_init_attr
*attr
)
705 if (attr
->srq
|| (attr
->qp_type
== IB_QPT_XRC_TGT
) ||
706 (attr
->qp_type
== IB_QPT_XRC_INI
))
707 return cpu_to_be32(MLX5_SRQ_RQ
);
708 else if (!qp
->has_rq
)
709 return cpu_to_be32(MLX5_ZERO_LEN_RQ
);
711 return cpu_to_be32(MLX5_NON_ZERO_RQ
);
714 static int is_connected(enum ib_qp_type qp_type
)
716 if (qp_type
== IB_QPT_RC
|| qp_type
== IB_QPT_UC
)
722 static int create_qp_common(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
723 struct ib_qp_init_attr
*init_attr
,
724 struct ib_udata
*udata
, struct mlx5_ib_qp
*qp
)
726 struct mlx5_ib_resources
*devr
= &dev
->devr
;
727 struct mlx5_ib_create_qp_resp resp
;
728 struct mlx5_create_qp_mbox_in
*in
;
729 struct mlx5_ib_create_qp ucmd
;
730 int inlen
= sizeof(*in
);
733 mutex_init(&qp
->mutex
);
734 spin_lock_init(&qp
->sq
.lock
);
735 spin_lock_init(&qp
->rq
.lock
);
737 if (init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
)
738 qp
->sq_signal_bits
= MLX5_WQE_CTRL_CQ_UPDATE
;
740 if (pd
&& pd
->uobject
) {
741 if (ib_copy_from_udata(&ucmd
, udata
, sizeof(ucmd
))) {
742 mlx5_ib_dbg(dev
, "copy failed\n");
746 qp
->wq_sig
= !!(ucmd
.flags
& MLX5_QP_FLAG_SIGNATURE
);
747 qp
->scat_cqe
= !!(ucmd
.flags
& MLX5_QP_FLAG_SCATTER_CQE
);
749 qp
->wq_sig
= !!wq_signature
;
752 qp
->has_rq
= qp_has_rq(init_attr
);
753 err
= set_rq_size(dev
, &init_attr
->cap
, qp
->has_rq
,
754 qp
, (pd
&& pd
->uobject
) ? &ucmd
: NULL
);
756 mlx5_ib_dbg(dev
, "err %d\n", err
);
762 mlx5_ib_dbg(dev
, "requested sq_wqe_count (%d)\n", ucmd
.sq_wqe_count
);
763 if (ucmd
.rq_wqe_shift
!= qp
->rq
.wqe_shift
||
764 ucmd
.rq_wqe_count
!= qp
->rq
.wqe_cnt
) {
765 mlx5_ib_dbg(dev
, "invalid rq params\n");
768 if (ucmd
.sq_wqe_count
> dev
->mdev
.caps
.max_wqes
) {
769 mlx5_ib_dbg(dev
, "requested sq_wqe_count (%d) > max allowed (%d)\n",
770 ucmd
.sq_wqe_count
, dev
->mdev
.caps
.max_wqes
);
773 err
= create_user_qp(dev
, pd
, qp
, udata
, &in
, &resp
, &inlen
);
775 mlx5_ib_dbg(dev
, "err %d\n", err
);
777 err
= create_kernel_qp(dev
, init_attr
, qp
, &in
, &inlen
);
779 mlx5_ib_dbg(dev
, "err %d\n", err
);
781 qp
->pa_lkey
= to_mpd(pd
)->pa_lkey
;
787 in
= mlx5_vzalloc(sizeof(*in
));
791 qp
->create_type
= MLX5_QP_EMPTY
;
794 if (is_sqp(init_attr
->qp_type
))
795 qp
->port
= init_attr
->port_num
;
797 in
->ctx
.flags
= cpu_to_be32(to_mlx5_st(init_attr
->qp_type
) << 16 |
798 MLX5_QP_PM_MIGRATED
<< 11);
800 if (init_attr
->qp_type
!= MLX5_IB_QPT_REG_UMR
)
801 in
->ctx
.flags_pd
= cpu_to_be32(to_mpd(pd
? pd
: devr
->p0
)->pdn
);
803 in
->ctx
.flags_pd
= cpu_to_be32(MLX5_QP_LAT_SENSITIVE
);
806 in
->ctx
.flags_pd
|= cpu_to_be32(MLX5_QP_ENABLE_SIG
);
808 if (qp
->scat_cqe
&& is_connected(init_attr
->qp_type
)) {
812 rcqe_sz
= mlx5_ib_get_cqe_size(dev
, init_attr
->recv_cq
);
813 scqe_sz
= mlx5_ib_get_cqe_size(dev
, init_attr
->send_cq
);
816 in
->ctx
.cs_res
= MLX5_RES_SCAT_DATA64_CQE
;
818 in
->ctx
.cs_res
= MLX5_RES_SCAT_DATA32_CQE
;
820 if (init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
) {
822 in
->ctx
.cs_req
= MLX5_REQ_SCAT_DATA64_CQE
;
824 in
->ctx
.cs_req
= MLX5_REQ_SCAT_DATA32_CQE
;
828 if (qp
->rq
.wqe_cnt
) {
829 in
->ctx
.rq_size_stride
= (qp
->rq
.wqe_shift
- 4);
830 in
->ctx
.rq_size_stride
|= ilog2(qp
->rq
.wqe_cnt
) << 3;
833 in
->ctx
.rq_type_srqn
= get_rx_type(qp
, init_attr
);
836 in
->ctx
.sq_crq_size
|= cpu_to_be16(ilog2(qp
->sq
.wqe_cnt
) << 11);
838 in
->ctx
.sq_crq_size
|= cpu_to_be16(0x8000);
840 /* Set default resources */
841 switch (init_attr
->qp_type
) {
843 in
->ctx
.cqn_recv
= cpu_to_be32(to_mcq(devr
->c0
)->mcq
.cqn
);
844 in
->ctx
.cqn_send
= cpu_to_be32(to_mcq(devr
->c0
)->mcq
.cqn
);
845 in
->ctx
.rq_type_srqn
|= cpu_to_be32(to_msrq(devr
->s0
)->msrq
.srqn
);
846 in
->ctx
.xrcd
= cpu_to_be32(to_mxrcd(init_attr
->xrcd
)->xrcdn
);
849 in
->ctx
.cqn_recv
= cpu_to_be32(to_mcq(devr
->c0
)->mcq
.cqn
);
850 in
->ctx
.xrcd
= cpu_to_be32(to_mxrcd(devr
->x1
)->xrcdn
);
851 in
->ctx
.rq_type_srqn
|= cpu_to_be32(to_msrq(devr
->s0
)->msrq
.srqn
);
854 if (init_attr
->srq
) {
855 in
->ctx
.xrcd
= cpu_to_be32(to_mxrcd(devr
->x0
)->xrcdn
);
856 in
->ctx
.rq_type_srqn
|= cpu_to_be32(to_msrq(init_attr
->srq
)->msrq
.srqn
);
858 in
->ctx
.xrcd
= cpu_to_be32(to_mxrcd(devr
->x1
)->xrcdn
);
859 in
->ctx
.rq_type_srqn
|= cpu_to_be32(to_msrq(devr
->s0
)->msrq
.srqn
);
863 if (init_attr
->send_cq
)
864 in
->ctx
.cqn_send
= cpu_to_be32(to_mcq(init_attr
->send_cq
)->mcq
.cqn
);
866 if (init_attr
->recv_cq
)
867 in
->ctx
.cqn_recv
= cpu_to_be32(to_mcq(init_attr
->recv_cq
)->mcq
.cqn
);
869 in
->ctx
.db_rec_addr
= cpu_to_be64(qp
->db
.dma
);
871 err
= mlx5_core_create_qp(&dev
->mdev
, &qp
->mqp
, in
, inlen
);
873 mlx5_ib_dbg(dev
, "create qp failed\n");
878 /* Hardware wants QPN written in big-endian order (after
879 * shifting) for send doorbell. Precompute this value to save
880 * a little bit when posting sends.
882 qp
->doorbell_qpn
= swab32(qp
->mqp
.qpn
<< 8);
884 qp
->mqp
.event
= mlx5_ib_qp_event
;
889 if (qp
->create_type
== MLX5_QP_USER
)
890 destroy_qp_user(pd
, qp
);
891 else if (qp
->create_type
== MLX5_QP_KERNEL
)
892 destroy_qp_kernel(dev
, qp
);
898 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq
*send_cq
, struct mlx5_ib_cq
*recv_cq
)
899 __acquires(&send_cq
->lock
) __acquires(&recv_cq
->lock
)
903 if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
904 spin_lock_irq(&send_cq
->lock
);
905 spin_lock_nested(&recv_cq
->lock
,
906 SINGLE_DEPTH_NESTING
);
907 } else if (send_cq
->mcq
.cqn
== recv_cq
->mcq
.cqn
) {
908 spin_lock_irq(&send_cq
->lock
);
909 __acquire(&recv_cq
->lock
);
911 spin_lock_irq(&recv_cq
->lock
);
912 spin_lock_nested(&send_cq
->lock
,
913 SINGLE_DEPTH_NESTING
);
916 spin_lock_irq(&send_cq
->lock
);
918 } else if (recv_cq
) {
919 spin_lock_irq(&recv_cq
->lock
);
923 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq
*send_cq
, struct mlx5_ib_cq
*recv_cq
)
924 __releases(&send_cq
->lock
) __releases(&recv_cq
->lock
)
928 if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
929 spin_unlock(&recv_cq
->lock
);
930 spin_unlock_irq(&send_cq
->lock
);
931 } else if (send_cq
->mcq
.cqn
== recv_cq
->mcq
.cqn
) {
932 __release(&recv_cq
->lock
);
933 spin_unlock_irq(&send_cq
->lock
);
935 spin_unlock(&send_cq
->lock
);
936 spin_unlock_irq(&recv_cq
->lock
);
939 spin_unlock_irq(&send_cq
->lock
);
941 } else if (recv_cq
) {
942 spin_unlock_irq(&recv_cq
->lock
);
946 static struct mlx5_ib_pd
*get_pd(struct mlx5_ib_qp
*qp
)
948 return to_mpd(qp
->ibqp
.pd
);
951 static void get_cqs(struct mlx5_ib_qp
*qp
,
952 struct mlx5_ib_cq
**send_cq
, struct mlx5_ib_cq
**recv_cq
)
954 switch (qp
->ibqp
.qp_type
) {
959 case MLX5_IB_QPT_REG_UMR
:
961 *send_cq
= to_mcq(qp
->ibqp
.send_cq
);
970 case IB_QPT_RAW_IPV6
:
971 case IB_QPT_RAW_ETHERTYPE
:
972 *send_cq
= to_mcq(qp
->ibqp
.send_cq
);
973 *recv_cq
= to_mcq(qp
->ibqp
.recv_cq
);
976 case IB_QPT_RAW_PACKET
:
985 static void destroy_qp_common(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
)
987 struct mlx5_ib_cq
*send_cq
, *recv_cq
;
988 struct mlx5_modify_qp_mbox_in
*in
;
991 in
= kzalloc(sizeof(*in
), GFP_KERNEL
);
994 if (qp
->state
!= IB_QPS_RESET
)
995 if (mlx5_core_qp_modify(&dev
->mdev
, to_mlx5_state(qp
->state
),
996 MLX5_QP_STATE_RST
, in
, sizeof(*in
), &qp
->mqp
))
997 mlx5_ib_warn(dev
, "mlx5_ib: modify QP %06x to RESET failed\n",
1000 get_cqs(qp
, &send_cq
, &recv_cq
);
1002 if (qp
->create_type
== MLX5_QP_KERNEL
) {
1003 mlx5_ib_lock_cqs(send_cq
, recv_cq
);
1004 __mlx5_ib_cq_clean(recv_cq
, qp
->mqp
.qpn
,
1005 qp
->ibqp
.srq
? to_msrq(qp
->ibqp
.srq
) : NULL
);
1006 if (send_cq
!= recv_cq
)
1007 __mlx5_ib_cq_clean(send_cq
, qp
->mqp
.qpn
, NULL
);
1008 mlx5_ib_unlock_cqs(send_cq
, recv_cq
);
1011 err
= mlx5_core_destroy_qp(&dev
->mdev
, &qp
->mqp
);
1013 mlx5_ib_warn(dev
, "failed to destroy QP 0x%x\n", qp
->mqp
.qpn
);
1017 if (qp
->create_type
== MLX5_QP_KERNEL
)
1018 destroy_qp_kernel(dev
, qp
);
1019 else if (qp
->create_type
== MLX5_QP_USER
)
1020 destroy_qp_user(&get_pd(qp
)->ibpd
, qp
);
1023 static const char *ib_qp_type_str(enum ib_qp_type type
)
1027 return "IB_QPT_SMI";
1029 return "IB_QPT_GSI";
1036 case IB_QPT_RAW_IPV6
:
1037 return "IB_QPT_RAW_IPV6";
1038 case IB_QPT_RAW_ETHERTYPE
:
1039 return "IB_QPT_RAW_ETHERTYPE";
1040 case IB_QPT_XRC_INI
:
1041 return "IB_QPT_XRC_INI";
1042 case IB_QPT_XRC_TGT
:
1043 return "IB_QPT_XRC_TGT";
1044 case IB_QPT_RAW_PACKET
:
1045 return "IB_QPT_RAW_PACKET";
1046 case MLX5_IB_QPT_REG_UMR
:
1047 return "MLX5_IB_QPT_REG_UMR";
1050 return "Invalid QP type";
1054 struct ib_qp
*mlx5_ib_create_qp(struct ib_pd
*pd
,
1055 struct ib_qp_init_attr
*init_attr
,
1056 struct ib_udata
*udata
)
1058 struct mlx5_ib_dev
*dev
;
1059 struct mlx5_ib_qp
*qp
;
1064 dev
= to_mdev(pd
->device
);
1066 /* being cautious here */
1067 if (init_attr
->qp_type
!= IB_QPT_XRC_TGT
&&
1068 init_attr
->qp_type
!= MLX5_IB_QPT_REG_UMR
) {
1069 pr_warn("%s: no PD for transport %s\n", __func__
,
1070 ib_qp_type_str(init_attr
->qp_type
));
1071 return ERR_PTR(-EINVAL
);
1073 dev
= to_mdev(to_mxrcd(init_attr
->xrcd
)->ibxrcd
.device
);
1076 switch (init_attr
->qp_type
) {
1077 case IB_QPT_XRC_TGT
:
1078 case IB_QPT_XRC_INI
:
1079 if (!(dev
->mdev
.caps
.flags
& MLX5_DEV_CAP_FLAG_XRC
)) {
1080 mlx5_ib_dbg(dev
, "XRC not supported\n");
1081 return ERR_PTR(-ENOSYS
);
1083 init_attr
->recv_cq
= NULL
;
1084 if (init_attr
->qp_type
== IB_QPT_XRC_TGT
) {
1085 xrcdn
= to_mxrcd(init_attr
->xrcd
)->xrcdn
;
1086 init_attr
->send_cq
= NULL
;
1095 case MLX5_IB_QPT_REG_UMR
:
1096 qp
= kzalloc(sizeof(*qp
), GFP_KERNEL
);
1098 return ERR_PTR(-ENOMEM
);
1100 err
= create_qp_common(dev
, pd
, init_attr
, udata
, qp
);
1102 mlx5_ib_dbg(dev
, "create_qp_common failed\n");
1104 return ERR_PTR(err
);
1107 if (is_qp0(init_attr
->qp_type
))
1108 qp
->ibqp
.qp_num
= 0;
1109 else if (is_qp1(init_attr
->qp_type
))
1110 qp
->ibqp
.qp_num
= 1;
1112 qp
->ibqp
.qp_num
= qp
->mqp
.qpn
;
1114 mlx5_ib_dbg(dev
, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
1115 qp
->ibqp
.qp_num
, qp
->mqp
.qpn
, to_mcq(init_attr
->recv_cq
)->mcq
.cqn
,
1116 to_mcq(init_attr
->send_cq
)->mcq
.cqn
);
1122 case IB_QPT_RAW_IPV6
:
1123 case IB_QPT_RAW_ETHERTYPE
:
1124 case IB_QPT_RAW_PACKET
:
1127 mlx5_ib_dbg(dev
, "unsupported qp type %d\n",
1128 init_attr
->qp_type
);
1129 /* Don't support raw QPs */
1130 return ERR_PTR(-EINVAL
);
1136 int mlx5_ib_destroy_qp(struct ib_qp
*qp
)
1138 struct mlx5_ib_dev
*dev
= to_mdev(qp
->device
);
1139 struct mlx5_ib_qp
*mqp
= to_mqp(qp
);
1141 destroy_qp_common(dev
, mqp
);
1148 static __be32
to_mlx5_access_flags(struct mlx5_ib_qp
*qp
, const struct ib_qp_attr
*attr
,
1151 u32 hw_access_flags
= 0;
1155 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
1156 dest_rd_atomic
= attr
->max_dest_rd_atomic
;
1158 dest_rd_atomic
= qp
->resp_depth
;
1160 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
1161 access_flags
= attr
->qp_access_flags
;
1163 access_flags
= qp
->atomic_rd_en
;
1165 if (!dest_rd_atomic
)
1166 access_flags
&= IB_ACCESS_REMOTE_WRITE
;
1168 if (access_flags
& IB_ACCESS_REMOTE_READ
)
1169 hw_access_flags
|= MLX5_QP_BIT_RRE
;
1170 if (access_flags
& IB_ACCESS_REMOTE_ATOMIC
)
1171 hw_access_flags
|= (MLX5_QP_BIT_RAE
| MLX5_ATOMIC_MODE_CX
);
1172 if (access_flags
& IB_ACCESS_REMOTE_WRITE
)
1173 hw_access_flags
|= MLX5_QP_BIT_RWE
;
1175 return cpu_to_be32(hw_access_flags
);
1179 MLX5_PATH_FLAG_FL
= 1 << 0,
1180 MLX5_PATH_FLAG_FREE_AR
= 1 << 1,
1181 MLX5_PATH_FLAG_COUNTER
= 1 << 2,
1184 static int ib_rate_to_mlx5(struct mlx5_ib_dev
*dev
, u8 rate
)
1186 if (rate
== IB_RATE_PORT_CURRENT
) {
1188 } else if (rate
< IB_RATE_2_5_GBPS
|| rate
> IB_RATE_300_GBPS
) {
1191 while (rate
!= IB_RATE_2_5_GBPS
&&
1192 !(1 << (rate
+ MLX5_STAT_RATE_OFFSET
) &
1193 dev
->mdev
.caps
.stat_rate_support
))
1197 return rate
+ MLX5_STAT_RATE_OFFSET
;
1200 static int mlx5_set_path(struct mlx5_ib_dev
*dev
, const struct ib_ah_attr
*ah
,
1201 struct mlx5_qp_path
*path
, u8 port
, int attr_mask
,
1202 u32 path_flags
, const struct ib_qp_attr
*attr
)
1206 path
->fl
= (path_flags
& MLX5_PATH_FLAG_FL
) ? 0x80 : 0;
1207 path
->free_ar
= (path_flags
& MLX5_PATH_FLAG_FREE_AR
) ? 0x80 : 0;
1209 if (attr_mask
& IB_QP_PKEY_INDEX
)
1210 path
->pkey_index
= attr
->pkey_index
;
1212 path
->grh_mlid
= ah
->src_path_bits
& 0x7f;
1213 path
->rlid
= cpu_to_be16(ah
->dlid
);
1215 if (ah
->ah_flags
& IB_AH_GRH
) {
1216 path
->grh_mlid
|= 1 << 7;
1217 path
->mgid_index
= ah
->grh
.sgid_index
;
1218 path
->hop_limit
= ah
->grh
.hop_limit
;
1219 path
->tclass_flowlabel
=
1220 cpu_to_be32((ah
->grh
.traffic_class
<< 20) |
1221 (ah
->grh
.flow_label
));
1222 memcpy(path
->rgid
, ah
->grh
.dgid
.raw
, 16);
1225 err
= ib_rate_to_mlx5(dev
, ah
->static_rate
);
1228 path
->static_rate
= err
;
1231 if (ah
->ah_flags
& IB_AH_GRH
) {
1232 if (ah
->grh
.sgid_index
>= dev
->mdev
.caps
.port
[port
- 1].gid_table_len
) {
1233 pr_err(KERN_ERR
"sgid_index (%u) too large. max is %d\n",
1234 ah
->grh
.sgid_index
, dev
->mdev
.caps
.port
[port
- 1].gid_table_len
);
1238 path
->grh_mlid
|= 1 << 7;
1239 path
->mgid_index
= ah
->grh
.sgid_index
;
1240 path
->hop_limit
= ah
->grh
.hop_limit
;
1241 path
->tclass_flowlabel
=
1242 cpu_to_be32((ah
->grh
.traffic_class
<< 20) |
1243 (ah
->grh
.flow_label
));
1244 memcpy(path
->rgid
, ah
->grh
.dgid
.raw
, 16);
1247 if (attr_mask
& IB_QP_TIMEOUT
)
1248 path
->ackto_lt
= attr
->timeout
<< 3;
1250 path
->sl
= ah
->sl
& 0xf;
1255 static enum mlx5_qp_optpar opt_mask
[MLX5_QP_NUM_STATE
][MLX5_QP_NUM_STATE
][MLX5_QP_ST_MAX
] = {
1256 [MLX5_QP_STATE_INIT
] = {
1257 [MLX5_QP_STATE_INIT
] = {
1258 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_RRE
|
1259 MLX5_QP_OPTPAR_RAE
|
1260 MLX5_QP_OPTPAR_RWE
|
1261 MLX5_QP_OPTPAR_PKEY_INDEX
|
1262 MLX5_QP_OPTPAR_PRI_PORT
,
1263 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_RWE
|
1264 MLX5_QP_OPTPAR_PKEY_INDEX
|
1265 MLX5_QP_OPTPAR_PRI_PORT
,
1266 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_PKEY_INDEX
|
1267 MLX5_QP_OPTPAR_Q_KEY
|
1268 MLX5_QP_OPTPAR_PRI_PORT
,
1270 [MLX5_QP_STATE_RTR
] = {
1271 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
1272 MLX5_QP_OPTPAR_RRE
|
1273 MLX5_QP_OPTPAR_RAE
|
1274 MLX5_QP_OPTPAR_RWE
|
1275 MLX5_QP_OPTPAR_PKEY_INDEX
,
1276 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
1277 MLX5_QP_OPTPAR_RWE
|
1278 MLX5_QP_OPTPAR_PKEY_INDEX
,
1279 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_PKEY_INDEX
|
1280 MLX5_QP_OPTPAR_Q_KEY
,
1281 [MLX5_QP_ST_MLX
] = MLX5_QP_OPTPAR_PKEY_INDEX
|
1282 MLX5_QP_OPTPAR_Q_KEY
,
1285 [MLX5_QP_STATE_RTR
] = {
1286 [MLX5_QP_STATE_RTS
] = {
1287 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
1288 MLX5_QP_OPTPAR_RRE
|
1289 MLX5_QP_OPTPAR_RAE
|
1290 MLX5_QP_OPTPAR_RWE
|
1291 MLX5_QP_OPTPAR_PM_STATE
|
1292 MLX5_QP_OPTPAR_RNR_TIMEOUT
,
1293 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
1294 MLX5_QP_OPTPAR_RWE
|
1295 MLX5_QP_OPTPAR_PM_STATE
,
1296 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_Q_KEY
,
1299 [MLX5_QP_STATE_RTS
] = {
1300 [MLX5_QP_STATE_RTS
] = {
1301 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_RRE
|
1302 MLX5_QP_OPTPAR_RAE
|
1303 MLX5_QP_OPTPAR_RWE
|
1304 MLX5_QP_OPTPAR_RNR_TIMEOUT
|
1305 MLX5_QP_OPTPAR_PM_STATE
,
1306 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_RWE
|
1307 MLX5_QP_OPTPAR_PM_STATE
,
1308 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_Q_KEY
|
1309 MLX5_QP_OPTPAR_SRQN
|
1310 MLX5_QP_OPTPAR_CQN_RCV
,
1313 [MLX5_QP_STATE_SQER
] = {
1314 [MLX5_QP_STATE_RTS
] = {
1315 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_Q_KEY
,
1316 [MLX5_QP_ST_MLX
] = MLX5_QP_OPTPAR_Q_KEY
,
1321 static int ib_nr_to_mlx5_nr(int ib_mask
)
1326 case IB_QP_CUR_STATE
:
1328 case IB_QP_EN_SQD_ASYNC_NOTIFY
:
1330 case IB_QP_ACCESS_FLAGS
:
1331 return MLX5_QP_OPTPAR_RWE
| MLX5_QP_OPTPAR_RRE
|
1333 case IB_QP_PKEY_INDEX
:
1334 return MLX5_QP_OPTPAR_PKEY_INDEX
;
1336 return MLX5_QP_OPTPAR_PRI_PORT
;
1338 return MLX5_QP_OPTPAR_Q_KEY
;
1340 return MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH
|
1341 MLX5_QP_OPTPAR_PRI_PORT
;
1342 case IB_QP_PATH_MTU
:
1345 return MLX5_QP_OPTPAR_ACK_TIMEOUT
;
1346 case IB_QP_RETRY_CNT
:
1347 return MLX5_QP_OPTPAR_RETRY_COUNT
;
1348 case IB_QP_RNR_RETRY
:
1349 return MLX5_QP_OPTPAR_RNR_RETRY
;
1352 case IB_QP_MAX_QP_RD_ATOMIC
:
1353 return MLX5_QP_OPTPAR_SRA_MAX
;
1354 case IB_QP_ALT_PATH
:
1355 return MLX5_QP_OPTPAR_ALT_ADDR_PATH
;
1356 case IB_QP_MIN_RNR_TIMER
:
1357 return MLX5_QP_OPTPAR_RNR_TIMEOUT
;
1360 case IB_QP_MAX_DEST_RD_ATOMIC
:
1361 return MLX5_QP_OPTPAR_RRA_MAX
| MLX5_QP_OPTPAR_RWE
|
1362 MLX5_QP_OPTPAR_RRE
| MLX5_QP_OPTPAR_RAE
;
1363 case IB_QP_PATH_MIG_STATE
:
1364 return MLX5_QP_OPTPAR_PM_STATE
;
1367 case IB_QP_DEST_QPN
:
1373 static int ib_mask_to_mlx5_opt(int ib_mask
)
1378 for (i
= 0; i
< 8 * sizeof(int); i
++) {
1379 if ((1 << i
) & ib_mask
)
1380 result
|= ib_nr_to_mlx5_nr(1 << i
);
1386 static int __mlx5_ib_modify_qp(struct ib_qp
*ibqp
,
1387 const struct ib_qp_attr
*attr
, int attr_mask
,
1388 enum ib_qp_state cur_state
, enum ib_qp_state new_state
)
1390 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
1391 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
1392 struct mlx5_ib_cq
*send_cq
, *recv_cq
;
1393 struct mlx5_qp_context
*context
;
1394 struct mlx5_modify_qp_mbox_in
*in
;
1395 struct mlx5_ib_pd
*pd
;
1396 enum mlx5_qp_state mlx5_cur
, mlx5_new
;
1397 enum mlx5_qp_optpar optpar
;
1402 in
= kzalloc(sizeof(*in
), GFP_KERNEL
);
1407 err
= to_mlx5_st(ibqp
->qp_type
);
1411 context
->flags
= cpu_to_be32(err
<< 16);
1413 if (!(attr_mask
& IB_QP_PATH_MIG_STATE
)) {
1414 context
->flags
|= cpu_to_be32(MLX5_QP_PM_MIGRATED
<< 11);
1416 switch (attr
->path_mig_state
) {
1417 case IB_MIG_MIGRATED
:
1418 context
->flags
|= cpu_to_be32(MLX5_QP_PM_MIGRATED
<< 11);
1421 context
->flags
|= cpu_to_be32(MLX5_QP_PM_REARM
<< 11);
1424 context
->flags
|= cpu_to_be32(MLX5_QP_PM_ARMED
<< 11);
1429 if (ibqp
->qp_type
== IB_QPT_GSI
|| ibqp
->qp_type
== IB_QPT_SMI
) {
1430 context
->mtu_msgmax
= (IB_MTU_256
<< 5) | 8;
1431 } else if (ibqp
->qp_type
== IB_QPT_UD
||
1432 ibqp
->qp_type
== MLX5_IB_QPT_REG_UMR
) {
1433 context
->mtu_msgmax
= (IB_MTU_4096
<< 5) | 12;
1434 } else if (attr_mask
& IB_QP_PATH_MTU
) {
1435 if (attr
->path_mtu
< IB_MTU_256
||
1436 attr
->path_mtu
> IB_MTU_4096
) {
1437 mlx5_ib_warn(dev
, "invalid mtu %d\n", attr
->path_mtu
);
1441 context
->mtu_msgmax
= (attr
->path_mtu
<< 5) | dev
->mdev
.caps
.log_max_msg
;
1444 if (attr_mask
& IB_QP_DEST_QPN
)
1445 context
->log_pg_sz_remote_qpn
= cpu_to_be32(attr
->dest_qp_num
);
1447 if (attr_mask
& IB_QP_PKEY_INDEX
)
1448 context
->pri_path
.pkey_index
= attr
->pkey_index
;
1450 /* todo implement counter_index functionality */
1452 if (is_sqp(ibqp
->qp_type
))
1453 context
->pri_path
.port
= qp
->port
;
1455 if (attr_mask
& IB_QP_PORT
)
1456 context
->pri_path
.port
= attr
->port_num
;
1458 if (attr_mask
& IB_QP_AV
) {
1459 err
= mlx5_set_path(dev
, &attr
->ah_attr
, &context
->pri_path
,
1460 attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
,
1461 attr_mask
, 0, attr
);
1466 if (attr_mask
& IB_QP_TIMEOUT
)
1467 context
->pri_path
.ackto_lt
|= attr
->timeout
<< 3;
1469 if (attr_mask
& IB_QP_ALT_PATH
) {
1470 err
= mlx5_set_path(dev
, &attr
->alt_ah_attr
, &context
->alt_path
,
1471 attr
->alt_port_num
, attr_mask
, 0, attr
);
1477 get_cqs(qp
, &send_cq
, &recv_cq
);
1479 context
->flags_pd
= cpu_to_be32(pd
? pd
->pdn
: to_mpd(dev
->devr
.p0
)->pdn
);
1480 context
->cqn_send
= send_cq
? cpu_to_be32(send_cq
->mcq
.cqn
) : 0;
1481 context
->cqn_recv
= recv_cq
? cpu_to_be32(recv_cq
->mcq
.cqn
) : 0;
1482 context
->params1
= cpu_to_be32(MLX5_IB_ACK_REQ_FREQ
<< 28);
1484 if (attr_mask
& IB_QP_RNR_RETRY
)
1485 context
->params1
|= cpu_to_be32(attr
->rnr_retry
<< 13);
1487 if (attr_mask
& IB_QP_RETRY_CNT
)
1488 context
->params1
|= cpu_to_be32(attr
->retry_cnt
<< 16);
1490 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
) {
1491 if (attr
->max_rd_atomic
)
1493 cpu_to_be32(fls(attr
->max_rd_atomic
- 1) << 21);
1496 if (attr_mask
& IB_QP_SQ_PSN
)
1497 context
->next_send_psn
= cpu_to_be32(attr
->sq_psn
);
1499 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
) {
1500 if (attr
->max_dest_rd_atomic
)
1502 cpu_to_be32(fls(attr
->max_dest_rd_atomic
- 1) << 21);
1505 if (attr_mask
& (IB_QP_ACCESS_FLAGS
| IB_QP_MAX_DEST_RD_ATOMIC
))
1506 context
->params2
|= to_mlx5_access_flags(qp
, attr
, attr_mask
);
1508 if (attr_mask
& IB_QP_MIN_RNR_TIMER
)
1509 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->min_rnr_timer
<< 24);
1511 if (attr_mask
& IB_QP_RQ_PSN
)
1512 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->rq_psn
);
1514 if (attr_mask
& IB_QP_QKEY
)
1515 context
->qkey
= cpu_to_be32(attr
->qkey
);
1517 if (qp
->rq
.wqe_cnt
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
1518 context
->db_rec_addr
= cpu_to_be64(qp
->db
.dma
);
1520 if (cur_state
== IB_QPS_RTS
&& new_state
== IB_QPS_SQD
&&
1521 attr_mask
& IB_QP_EN_SQD_ASYNC_NOTIFY
&& attr
->en_sqd_async_notify
)
1526 if (!ibqp
->uobject
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
1527 context
->sq_crq_size
|= cpu_to_be16(1 << 4);
1530 mlx5_cur
= to_mlx5_state(cur_state
);
1531 mlx5_new
= to_mlx5_state(new_state
);
1532 mlx5_st
= to_mlx5_st(ibqp
->qp_type
);
1533 if (mlx5_cur
< 0 || mlx5_new
< 0 || mlx5_st
< 0)
1536 optpar
= ib_mask_to_mlx5_opt(attr_mask
);
1537 optpar
&= opt_mask
[mlx5_cur
][mlx5_new
][mlx5_st
];
1538 in
->optparam
= cpu_to_be32(optpar
);
1539 err
= mlx5_core_qp_modify(&dev
->mdev
, to_mlx5_state(cur_state
),
1540 to_mlx5_state(new_state
), in
, sqd_event
,
1545 qp
->state
= new_state
;
1547 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
1548 qp
->atomic_rd_en
= attr
->qp_access_flags
;
1549 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
1550 qp
->resp_depth
= attr
->max_dest_rd_atomic
;
1551 if (attr_mask
& IB_QP_PORT
)
1552 qp
->port
= attr
->port_num
;
1553 if (attr_mask
& IB_QP_ALT_PATH
)
1554 qp
->alt_port
= attr
->alt_port_num
;
1557 * If we moved a kernel QP to RESET, clean up all old CQ
1558 * entries and reinitialize the QP.
1560 if (new_state
== IB_QPS_RESET
&& !ibqp
->uobject
) {
1561 mlx5_ib_cq_clean(recv_cq
, qp
->mqp
.qpn
,
1562 ibqp
->srq
? to_msrq(ibqp
->srq
) : NULL
);
1563 if (send_cq
!= recv_cq
)
1564 mlx5_ib_cq_clean(send_cq
, qp
->mqp
.qpn
, NULL
);
1570 qp
->sq
.cur_post
= 0;
1571 qp
->sq
.last_poll
= 0;
1572 qp
->db
.db
[MLX5_RCV_DBR
] = 0;
1573 qp
->db
.db
[MLX5_SND_DBR
] = 0;
1581 int mlx5_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
1582 int attr_mask
, struct ib_udata
*udata
)
1584 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
1585 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
1586 enum ib_qp_state cur_state
, new_state
;
1590 mutex_lock(&qp
->mutex
);
1592 cur_state
= attr_mask
& IB_QP_CUR_STATE
? attr
->cur_qp_state
: qp
->state
;
1593 new_state
= attr_mask
& IB_QP_STATE
? attr
->qp_state
: cur_state
;
1595 if (ibqp
->qp_type
!= MLX5_IB_QPT_REG_UMR
&&
1596 !ib_modify_qp_is_ok(cur_state
, new_state
, ibqp
->qp_type
, attr_mask
))
1599 if ((attr_mask
& IB_QP_PORT
) &&
1600 (attr
->port_num
== 0 || attr
->port_num
> dev
->mdev
.caps
.num_ports
))
1603 if (attr_mask
& IB_QP_PKEY_INDEX
) {
1604 port
= attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
;
1605 if (attr
->pkey_index
>= dev
->mdev
.caps
.port
[port
- 1].pkey_table_len
)
1609 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
&&
1610 attr
->max_rd_atomic
> dev
->mdev
.caps
.max_ra_res_qp
)
1613 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
&&
1614 attr
->max_dest_rd_atomic
> dev
->mdev
.caps
.max_ra_req_qp
)
1617 if (cur_state
== new_state
&& cur_state
== IB_QPS_RESET
) {
1622 err
= __mlx5_ib_modify_qp(ibqp
, attr
, attr_mask
, cur_state
, new_state
);
1625 mutex_unlock(&qp
->mutex
);
1629 static int mlx5_wq_overflow(struct mlx5_ib_wq
*wq
, int nreq
, struct ib_cq
*ib_cq
)
1631 struct mlx5_ib_cq
*cq
;
1634 cur
= wq
->head
- wq
->tail
;
1635 if (likely(cur
+ nreq
< wq
->max_post
))
1639 spin_lock(&cq
->lock
);
1640 cur
= wq
->head
- wq
->tail
;
1641 spin_unlock(&cq
->lock
);
1643 return cur
+ nreq
>= wq
->max_post
;
1646 static __always_inline
void set_raddr_seg(struct mlx5_wqe_raddr_seg
*rseg
,
1647 u64 remote_addr
, u32 rkey
)
1649 rseg
->raddr
= cpu_to_be64(remote_addr
);
1650 rseg
->rkey
= cpu_to_be32(rkey
);
1654 static void set_atomic_seg(struct mlx5_wqe_atomic_seg
*aseg
, struct ib_send_wr
*wr
)
1656 if (wr
->opcode
== IB_WR_ATOMIC_CMP_AND_SWP
) {
1657 aseg
->swap_add
= cpu_to_be64(wr
->wr
.atomic
.swap
);
1658 aseg
->compare
= cpu_to_be64(wr
->wr
.atomic
.compare_add
);
1659 } else if (wr
->opcode
== IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
) {
1660 aseg
->swap_add
= cpu_to_be64(wr
->wr
.atomic
.compare_add
);
1661 aseg
->compare
= cpu_to_be64(wr
->wr
.atomic
.compare_add_mask
);
1663 aseg
->swap_add
= cpu_to_be64(wr
->wr
.atomic
.compare_add
);
1668 static void set_masked_atomic_seg(struct mlx5_wqe_masked_atomic_seg
*aseg
,
1669 struct ib_send_wr
*wr
)
1671 aseg
->swap_add
= cpu_to_be64(wr
->wr
.atomic
.swap
);
1672 aseg
->swap_add_mask
= cpu_to_be64(wr
->wr
.atomic
.swap_mask
);
1673 aseg
->compare
= cpu_to_be64(wr
->wr
.atomic
.compare_add
);
1674 aseg
->compare_mask
= cpu_to_be64(wr
->wr
.atomic
.compare_add_mask
);
1677 static void set_datagram_seg(struct mlx5_wqe_datagram_seg
*dseg
,
1678 struct ib_send_wr
*wr
)
1680 memcpy(&dseg
->av
, &to_mah(wr
->wr
.ud
.ah
)->av
, sizeof(struct mlx5_av
));
1681 dseg
->av
.dqp_dct
= cpu_to_be32(wr
->wr
.ud
.remote_qpn
| MLX5_EXTENDED_UD_AV
);
1682 dseg
->av
.key
.qkey
.qkey
= cpu_to_be32(wr
->wr
.ud
.remote_qkey
);
1685 static void set_data_ptr_seg(struct mlx5_wqe_data_seg
*dseg
, struct ib_sge
*sg
)
1687 dseg
->byte_count
= cpu_to_be32(sg
->length
);
1688 dseg
->lkey
= cpu_to_be32(sg
->lkey
);
1689 dseg
->addr
= cpu_to_be64(sg
->addr
);
1692 static __be16
get_klm_octo(int npages
)
1694 return cpu_to_be16(ALIGN(npages
, 8) / 2);
1697 static __be64
frwr_mkey_mask(void)
1701 result
= MLX5_MKEY_MASK_LEN
|
1702 MLX5_MKEY_MASK_PAGE_SIZE
|
1703 MLX5_MKEY_MASK_START_ADDR
|
1704 MLX5_MKEY_MASK_EN_RINVAL
|
1705 MLX5_MKEY_MASK_KEY
|
1711 MLX5_MKEY_MASK_SMALL_FENCE
|
1712 MLX5_MKEY_MASK_FREE
;
1714 return cpu_to_be64(result
);
1717 static void set_frwr_umr_segment(struct mlx5_wqe_umr_ctrl_seg
*umr
,
1718 struct ib_send_wr
*wr
, int li
)
1720 memset(umr
, 0, sizeof(*umr
));
1723 umr
->mkey_mask
= cpu_to_be64(MLX5_MKEY_MASK_FREE
);
1724 umr
->flags
= 1 << 7;
1728 umr
->flags
= (1 << 5); /* fail if not free */
1729 umr
->klm_octowords
= get_klm_octo(wr
->wr
.fast_reg
.page_list_len
);
1730 umr
->mkey_mask
= frwr_mkey_mask();
1733 static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg
*umr
,
1734 struct ib_send_wr
*wr
)
1736 struct umr_wr
*umrwr
= (struct umr_wr
*)&wr
->wr
.fast_reg
;
1739 memset(umr
, 0, sizeof(*umr
));
1741 if (!(wr
->send_flags
& MLX5_IB_SEND_UMR_UNREG
)) {
1742 umr
->flags
= 1 << 5; /* fail if not free */
1743 umr
->klm_octowords
= get_klm_octo(umrwr
->npages
);
1744 mask
= MLX5_MKEY_MASK_LEN
|
1745 MLX5_MKEY_MASK_PAGE_SIZE
|
1746 MLX5_MKEY_MASK_START_ADDR
|
1753 MLX5_MKEY_MASK_FREE
;
1754 umr
->mkey_mask
= cpu_to_be64(mask
);
1756 umr
->flags
= 2 << 5; /* fail if free */
1757 mask
= MLX5_MKEY_MASK_FREE
;
1758 umr
->mkey_mask
= cpu_to_be64(mask
);
1762 umr
->flags
|= (1 << 7); /* inline */
1765 static u8
get_umr_flags(int acc
)
1767 return (acc
& IB_ACCESS_REMOTE_ATOMIC
? MLX5_PERM_ATOMIC
: 0) |
1768 (acc
& IB_ACCESS_REMOTE_WRITE
? MLX5_PERM_REMOTE_WRITE
: 0) |
1769 (acc
& IB_ACCESS_REMOTE_READ
? MLX5_PERM_REMOTE_READ
: 0) |
1770 (acc
& IB_ACCESS_LOCAL_WRITE
? MLX5_PERM_LOCAL_WRITE
: 0) |
1771 MLX5_PERM_LOCAL_READ
| MLX5_PERM_UMR_EN
| MLX5_ACCESS_MODE_MTT
;
1774 static void set_mkey_segment(struct mlx5_mkey_seg
*seg
, struct ib_send_wr
*wr
,
1777 memset(seg
, 0, sizeof(*seg
));
1779 seg
->status
= 1 << 6;
1783 seg
->flags
= get_umr_flags(wr
->wr
.fast_reg
.access_flags
);
1784 *writ
= seg
->flags
& (MLX5_PERM_LOCAL_WRITE
| IB_ACCESS_REMOTE_WRITE
);
1785 seg
->qpn_mkey7_0
= cpu_to_be32((wr
->wr
.fast_reg
.rkey
& 0xff) | 0xffffff00);
1786 seg
->flags_pd
= cpu_to_be32(MLX5_MKEY_REMOTE_INVAL
);
1787 seg
->start_addr
= cpu_to_be64(wr
->wr
.fast_reg
.iova_start
);
1788 seg
->len
= cpu_to_be64(wr
->wr
.fast_reg
.length
);
1789 seg
->xlt_oct_size
= cpu_to_be32((wr
->wr
.fast_reg
.page_list_len
+ 1) / 2);
1790 seg
->log2_page_size
= wr
->wr
.fast_reg
.page_shift
;
1793 static void set_reg_mkey_segment(struct mlx5_mkey_seg
*seg
, struct ib_send_wr
*wr
)
1795 memset(seg
, 0, sizeof(*seg
));
1796 if (wr
->send_flags
& MLX5_IB_SEND_UMR_UNREG
) {
1797 seg
->status
= 1 << 6;
1801 seg
->flags
= convert_access(wr
->wr
.fast_reg
.access_flags
);
1802 seg
->flags_pd
= cpu_to_be32(to_mpd((struct ib_pd
*)wr
->wr
.fast_reg
.page_list
)->pdn
);
1803 seg
->start_addr
= cpu_to_be64(wr
->wr
.fast_reg
.iova_start
);
1804 seg
->len
= cpu_to_be64(wr
->wr
.fast_reg
.length
);
1805 seg
->log2_page_size
= wr
->wr
.fast_reg
.page_shift
;
1806 seg
->qpn_mkey7_0
= cpu_to_be32(0xffffff << 8);
1809 static void set_frwr_pages(struct mlx5_wqe_data_seg
*dseg
,
1810 struct ib_send_wr
*wr
,
1811 struct mlx5_core_dev
*mdev
,
1812 struct mlx5_ib_pd
*pd
,
1815 struct mlx5_ib_fast_reg_page_list
*mfrpl
= to_mfrpl(wr
->wr
.fast_reg
.page_list
);
1816 u64
*page_list
= wr
->wr
.fast_reg
.page_list
->page_list
;
1817 u64 perm
= MLX5_EN_RD
| (writ
? MLX5_EN_WR
: 0);
1820 for (i
= 0; i
< wr
->wr
.fast_reg
.page_list_len
; i
++)
1821 mfrpl
->mapped_page_list
[i
] = cpu_to_be64(page_list
[i
] | perm
);
1822 dseg
->addr
= cpu_to_be64(mfrpl
->map
);
1823 dseg
->byte_count
= cpu_to_be32(ALIGN(sizeof(u64
) * wr
->wr
.fast_reg
.page_list_len
, 64));
1824 dseg
->lkey
= cpu_to_be32(pd
->pa_lkey
);
1827 static __be32
send_ieth(struct ib_send_wr
*wr
)
1829 switch (wr
->opcode
) {
1830 case IB_WR_SEND_WITH_IMM
:
1831 case IB_WR_RDMA_WRITE_WITH_IMM
:
1832 return wr
->ex
.imm_data
;
1834 case IB_WR_SEND_WITH_INV
:
1835 return cpu_to_be32(wr
->ex
.invalidate_rkey
);
1842 static u8
calc_sig(void *wqe
, int size
)
1848 for (i
= 0; i
< size
; i
++)
1854 static u8
wq_sig(void *wqe
)
1856 return calc_sig(wqe
, (*((u8
*)wqe
+ 8) & 0x3f) << 4);
1859 static int set_data_inl_seg(struct mlx5_ib_qp
*qp
, struct ib_send_wr
*wr
,
1862 struct mlx5_wqe_inline_seg
*seg
;
1863 void *qend
= qp
->sq
.qend
;
1871 wqe
+= sizeof(*seg
);
1872 for (i
= 0; i
< wr
->num_sge
; i
++) {
1873 addr
= (void *)(unsigned long)(wr
->sg_list
[i
].addr
);
1874 len
= wr
->sg_list
[i
].length
;
1877 if (unlikely(inl
> qp
->max_inline_data
))
1880 if (unlikely(wqe
+ len
> qend
)) {
1882 memcpy(wqe
, addr
, copy
);
1885 wqe
= mlx5_get_send_wqe(qp
, 0);
1887 memcpy(wqe
, addr
, len
);
1891 seg
->byte_count
= cpu_to_be32(inl
| MLX5_INLINE_SEG
);
1893 *sz
= ALIGN(inl
+ sizeof(seg
->byte_count
), 16) / 16;
1898 static int set_frwr_li_wr(void **seg
, struct ib_send_wr
*wr
, int *size
,
1899 struct mlx5_core_dev
*mdev
, struct mlx5_ib_pd
*pd
, struct mlx5_ib_qp
*qp
)
1904 li
= wr
->opcode
== IB_WR_LOCAL_INV
? 1 : 0;
1905 if (unlikely(wr
->send_flags
& IB_SEND_INLINE
))
1908 set_frwr_umr_segment(*seg
, wr
, li
);
1909 *seg
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
);
1910 *size
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
) / 16;
1911 if (unlikely((*seg
== qp
->sq
.qend
)))
1912 *seg
= mlx5_get_send_wqe(qp
, 0);
1913 set_mkey_segment(*seg
, wr
, li
, &writ
);
1914 *seg
+= sizeof(struct mlx5_mkey_seg
);
1915 *size
+= sizeof(struct mlx5_mkey_seg
) / 16;
1916 if (unlikely((*seg
== qp
->sq
.qend
)))
1917 *seg
= mlx5_get_send_wqe(qp
, 0);
1919 set_frwr_pages(*seg
, wr
, mdev
, pd
, writ
);
1920 *seg
+= sizeof(struct mlx5_wqe_data_seg
);
1921 *size
+= (sizeof(struct mlx5_wqe_data_seg
) / 16);
1926 static void dump_wqe(struct mlx5_ib_qp
*qp
, int idx
, int size_16
)
1932 pr_debug("dump wqe at %p\n", mlx5_get_send_wqe(qp
, tidx
));
1933 for (i
= 0, j
= 0; i
< size_16
* 4; i
+= 4, j
+= 4) {
1934 if ((i
& 0xf) == 0) {
1935 void *buf
= mlx5_get_send_wqe(qp
, tidx
);
1936 tidx
= (tidx
+ 1) & (qp
->sq
.wqe_cnt
- 1);
1940 pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p
[j
]),
1941 be32_to_cpu(p
[j
+ 1]), be32_to_cpu(p
[j
+ 2]),
1942 be32_to_cpu(p
[j
+ 3]));
1946 static void mlx5_bf_copy(u64 __iomem
*dst
, u64
*src
,
1947 unsigned bytecnt
, struct mlx5_ib_qp
*qp
)
1949 while (bytecnt
> 0) {
1950 __iowrite64_copy(dst
++, src
++, 8);
1951 __iowrite64_copy(dst
++, src
++, 8);
1952 __iowrite64_copy(dst
++, src
++, 8);
1953 __iowrite64_copy(dst
++, src
++, 8);
1954 __iowrite64_copy(dst
++, src
++, 8);
1955 __iowrite64_copy(dst
++, src
++, 8);
1956 __iowrite64_copy(dst
++, src
++, 8);
1957 __iowrite64_copy(dst
++, src
++, 8);
1959 if (unlikely(src
== qp
->sq
.qend
))
1960 src
= mlx5_get_send_wqe(qp
, 0);
1964 static u8
get_fence(u8 fence
, struct ib_send_wr
*wr
)
1966 if (unlikely(wr
->opcode
== IB_WR_LOCAL_INV
&&
1967 wr
->send_flags
& IB_SEND_FENCE
))
1968 return MLX5_FENCE_MODE_STRONG_ORDERING
;
1970 if (unlikely(fence
)) {
1971 if (wr
->send_flags
& IB_SEND_FENCE
)
1972 return MLX5_FENCE_MODE_SMALL_AND_FENCE
;
1981 int mlx5_ib_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
1982 struct ib_send_wr
**bad_wr
)
1984 struct mlx5_wqe_ctrl_seg
*ctrl
= NULL
; /* compiler warning */
1985 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
1986 struct mlx5_core_dev
*mdev
= &dev
->mdev
;
1987 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
1988 struct mlx5_wqe_data_seg
*dpseg
;
1989 struct mlx5_wqe_xrc_seg
*xrc
;
1990 struct mlx5_bf
*bf
= qp
->bf
;
1991 int uninitialized_var(size
);
1992 void *qend
= qp
->sq
.qend
;
1993 unsigned long flags
;
2006 spin_lock_irqsave(&qp
->sq
.lock
, flags
);
2008 for (nreq
= 0; wr
; nreq
++, wr
= wr
->next
) {
2009 if (unlikely(wr
->opcode
>= sizeof(mlx5_ib_opcode
) / sizeof(mlx5_ib_opcode
[0]))) {
2010 mlx5_ib_warn(dev
, "\n");
2016 if (unlikely(mlx5_wq_overflow(&qp
->sq
, nreq
, qp
->ibqp
.send_cq
))) {
2017 mlx5_ib_warn(dev
, "\n");
2023 fence
= qp
->fm_cache
;
2024 num_sge
= wr
->num_sge
;
2025 if (unlikely(num_sge
> qp
->sq
.max_gs
)) {
2026 mlx5_ib_warn(dev
, "\n");
2032 idx
= qp
->sq
.cur_post
& (qp
->sq
.wqe_cnt
- 1);
2033 seg
= mlx5_get_send_wqe(qp
, idx
);
2035 *(uint32_t *)(seg
+ 8) = 0;
2036 ctrl
->imm
= send_ieth(wr
);
2037 ctrl
->fm_ce_se
= qp
->sq_signal_bits
|
2038 (wr
->send_flags
& IB_SEND_SIGNALED
?
2039 MLX5_WQE_CTRL_CQ_UPDATE
: 0) |
2040 (wr
->send_flags
& IB_SEND_SOLICITED
?
2041 MLX5_WQE_CTRL_SOLICITED
: 0);
2043 seg
+= sizeof(*ctrl
);
2044 size
= sizeof(*ctrl
) / 16;
2046 switch (ibqp
->qp_type
) {
2047 case IB_QPT_XRC_INI
:
2049 xrc
->xrc_srqn
= htonl(wr
->xrc_remote_srq_num
);
2050 seg
+= sizeof(*xrc
);
2051 size
+= sizeof(*xrc
) / 16;
2054 switch (wr
->opcode
) {
2055 case IB_WR_RDMA_READ
:
2056 case IB_WR_RDMA_WRITE
:
2057 case IB_WR_RDMA_WRITE_WITH_IMM
:
2058 set_raddr_seg(seg
, wr
->wr
.rdma
.remote_addr
,
2060 seg
+= sizeof(struct mlx5_wqe_raddr_seg
);
2061 size
+= sizeof(struct mlx5_wqe_raddr_seg
) / 16;
2064 case IB_WR_ATOMIC_CMP_AND_SWP
:
2065 case IB_WR_ATOMIC_FETCH_AND_ADD
:
2066 set_raddr_seg(seg
, wr
->wr
.atomic
.remote_addr
,
2067 wr
->wr
.atomic
.rkey
);
2068 seg
+= sizeof(struct mlx5_wqe_raddr_seg
);
2070 set_atomic_seg(seg
, wr
);
2071 seg
+= sizeof(struct mlx5_wqe_atomic_seg
);
2073 size
+= (sizeof(struct mlx5_wqe_raddr_seg
) +
2074 sizeof(struct mlx5_wqe_atomic_seg
)) / 16;
2077 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP
:
2078 set_raddr_seg(seg
, wr
->wr
.atomic
.remote_addr
,
2079 wr
->wr
.atomic
.rkey
);
2080 seg
+= sizeof(struct mlx5_wqe_raddr_seg
);
2082 set_masked_atomic_seg(seg
, wr
);
2083 seg
+= sizeof(struct mlx5_wqe_masked_atomic_seg
);
2085 size
+= (sizeof(struct mlx5_wqe_raddr_seg
) +
2086 sizeof(struct mlx5_wqe_masked_atomic_seg
)) / 16;
2089 case IB_WR_LOCAL_INV
:
2090 next_fence
= MLX5_FENCE_MODE_INITIATOR_SMALL
;
2091 qp
->sq
.wr_data
[idx
] = IB_WR_LOCAL_INV
;
2092 ctrl
->imm
= cpu_to_be32(wr
->ex
.invalidate_rkey
);
2093 err
= set_frwr_li_wr(&seg
, wr
, &size
, mdev
, to_mpd(ibqp
->pd
), qp
);
2095 mlx5_ib_warn(dev
, "\n");
2102 case IB_WR_FAST_REG_MR
:
2103 next_fence
= MLX5_FENCE_MODE_INITIATOR_SMALL
;
2104 qp
->sq
.wr_data
[idx
] = IB_WR_FAST_REG_MR
;
2105 ctrl
->imm
= cpu_to_be32(wr
->wr
.fast_reg
.rkey
);
2106 err
= set_frwr_li_wr(&seg
, wr
, &size
, mdev
, to_mpd(ibqp
->pd
), qp
);
2108 mlx5_ib_warn(dev
, "\n");
2121 switch (wr
->opcode
) {
2122 case IB_WR_RDMA_WRITE
:
2123 case IB_WR_RDMA_WRITE_WITH_IMM
:
2124 set_raddr_seg(seg
, wr
->wr
.rdma
.remote_addr
,
2126 seg
+= sizeof(struct mlx5_wqe_raddr_seg
);
2127 size
+= sizeof(struct mlx5_wqe_raddr_seg
) / 16;
2138 set_datagram_seg(seg
, wr
);
2139 seg
+= sizeof(struct mlx5_wqe_datagram_seg
);
2140 size
+= sizeof(struct mlx5_wqe_datagram_seg
) / 16;
2141 if (unlikely((seg
== qend
)))
2142 seg
= mlx5_get_send_wqe(qp
, 0);
2145 case MLX5_IB_QPT_REG_UMR
:
2146 if (wr
->opcode
!= MLX5_IB_WR_UMR
) {
2148 mlx5_ib_warn(dev
, "bad opcode\n");
2151 qp
->sq
.wr_data
[idx
] = MLX5_IB_WR_UMR
;
2152 ctrl
->imm
= cpu_to_be32(wr
->wr
.fast_reg
.rkey
);
2153 set_reg_umr_segment(seg
, wr
);
2154 seg
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
);
2155 size
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
) / 16;
2156 if (unlikely((seg
== qend
)))
2157 seg
= mlx5_get_send_wqe(qp
, 0);
2158 set_reg_mkey_segment(seg
, wr
);
2159 seg
+= sizeof(struct mlx5_mkey_seg
);
2160 size
+= sizeof(struct mlx5_mkey_seg
) / 16;
2161 if (unlikely((seg
== qend
)))
2162 seg
= mlx5_get_send_wqe(qp
, 0);
2169 if (wr
->send_flags
& IB_SEND_INLINE
&& num_sge
) {
2170 int uninitialized_var(sz
);
2172 err
= set_data_inl_seg(qp
, wr
, seg
, &sz
);
2173 if (unlikely(err
)) {
2174 mlx5_ib_warn(dev
, "\n");
2182 for (i
= 0; i
< num_sge
; i
++) {
2183 if (unlikely(dpseg
== qend
)) {
2184 seg
= mlx5_get_send_wqe(qp
, 0);
2187 if (likely(wr
->sg_list
[i
].length
)) {
2188 set_data_ptr_seg(dpseg
, wr
->sg_list
+ i
);
2189 size
+= sizeof(struct mlx5_wqe_data_seg
) / 16;
2195 mlx5_opcode
= mlx5_ib_opcode
[wr
->opcode
];
2196 ctrl
->opmod_idx_opcode
= cpu_to_be32(((u32
)(qp
->sq
.cur_post
) << 8) |
2198 ((u32
)opmod
<< 24));
2199 ctrl
->qpn_ds
= cpu_to_be32(size
| (qp
->mqp
.qpn
<< 8));
2200 ctrl
->fm_ce_se
|= get_fence(fence
, wr
);
2201 qp
->fm_cache
= next_fence
;
2202 if (unlikely(qp
->wq_sig
))
2203 ctrl
->signature
= wq_sig(ctrl
);
2205 qp
->sq
.wrid
[idx
] = wr
->wr_id
;
2206 qp
->sq
.w_list
[idx
].opcode
= mlx5_opcode
;
2207 qp
->sq
.wqe_head
[idx
] = qp
->sq
.head
+ nreq
;
2208 qp
->sq
.cur_post
+= DIV_ROUND_UP(size
* 16, MLX5_SEND_WQE_BB
);
2209 qp
->sq
.w_list
[idx
].next
= qp
->sq
.cur_post
;
2212 dump_wqe(qp
, idx
, size
);
2217 qp
->sq
.head
+= nreq
;
2219 /* Make sure that descriptors are written before
2220 * updating doorbell record and ringing the doorbell
2224 qp
->db
.db
[MLX5_SND_DBR
] = cpu_to_be32(qp
->sq
.cur_post
);
2227 spin_lock(&bf
->lock
);
2230 if (0 && nreq
== 1 && bf
->uuarn
&& inl
&& size
> 1 && size
<= bf
->buf_size
/ 16) {
2231 mlx5_bf_copy(bf
->reg
+ bf
->offset
, (u64
*)ctrl
, ALIGN(size
* 16, 64), qp
);
2234 mlx5_write64((__be32
*)ctrl
, bf
->regreg
+ bf
->offset
,
2235 MLX5_GET_DOORBELL_LOCK(&bf
->lock32
));
2236 /* Make sure doorbells don't leak out of SQ spinlock
2237 * and reach the HCA out of order.
2241 bf
->offset
^= bf
->buf_size
;
2243 spin_unlock(&bf
->lock
);
2246 spin_unlock_irqrestore(&qp
->sq
.lock
, flags
);
2251 static void set_sig_seg(struct mlx5_rwqe_sig
*sig
, int size
)
2253 sig
->signature
= calc_sig(sig
, size
);
2256 int mlx5_ib_post_recv(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
2257 struct ib_recv_wr
**bad_wr
)
2259 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
2260 struct mlx5_wqe_data_seg
*scat
;
2261 struct mlx5_rwqe_sig
*sig
;
2262 unsigned long flags
;
2268 spin_lock_irqsave(&qp
->rq
.lock
, flags
);
2270 ind
= qp
->rq
.head
& (qp
->rq
.wqe_cnt
- 1);
2272 for (nreq
= 0; wr
; nreq
++, wr
= wr
->next
) {
2273 if (mlx5_wq_overflow(&qp
->rq
, nreq
, qp
->ibqp
.recv_cq
)) {
2279 if (unlikely(wr
->num_sge
> qp
->rq
.max_gs
)) {
2285 scat
= get_recv_wqe(qp
, ind
);
2289 for (i
= 0; i
< wr
->num_sge
; i
++)
2290 set_data_ptr_seg(scat
+ i
, wr
->sg_list
+ i
);
2292 if (i
< qp
->rq
.max_gs
) {
2293 scat
[i
].byte_count
= 0;
2294 scat
[i
].lkey
= cpu_to_be32(MLX5_INVALID_LKEY
);
2299 sig
= (struct mlx5_rwqe_sig
*)scat
;
2300 set_sig_seg(sig
, (qp
->rq
.max_gs
+ 1) << 2);
2303 qp
->rq
.wrid
[ind
] = wr
->wr_id
;
2305 ind
= (ind
+ 1) & (qp
->rq
.wqe_cnt
- 1);
2310 qp
->rq
.head
+= nreq
;
2312 /* Make sure that descriptors are written before
2317 *qp
->db
.db
= cpu_to_be32(qp
->rq
.head
& 0xffff);
2320 spin_unlock_irqrestore(&qp
->rq
.lock
, flags
);
2325 static inline enum ib_qp_state
to_ib_qp_state(enum mlx5_qp_state mlx5_state
)
2327 switch (mlx5_state
) {
2328 case MLX5_QP_STATE_RST
: return IB_QPS_RESET
;
2329 case MLX5_QP_STATE_INIT
: return IB_QPS_INIT
;
2330 case MLX5_QP_STATE_RTR
: return IB_QPS_RTR
;
2331 case MLX5_QP_STATE_RTS
: return IB_QPS_RTS
;
2332 case MLX5_QP_STATE_SQ_DRAINING
:
2333 case MLX5_QP_STATE_SQD
: return IB_QPS_SQD
;
2334 case MLX5_QP_STATE_SQER
: return IB_QPS_SQE
;
2335 case MLX5_QP_STATE_ERR
: return IB_QPS_ERR
;
2340 static inline enum ib_mig_state
to_ib_mig_state(int mlx5_mig_state
)
2342 switch (mlx5_mig_state
) {
2343 case MLX5_QP_PM_ARMED
: return IB_MIG_ARMED
;
2344 case MLX5_QP_PM_REARM
: return IB_MIG_REARM
;
2345 case MLX5_QP_PM_MIGRATED
: return IB_MIG_MIGRATED
;
2350 static int to_ib_qp_access_flags(int mlx5_flags
)
2354 if (mlx5_flags
& MLX5_QP_BIT_RRE
)
2355 ib_flags
|= IB_ACCESS_REMOTE_READ
;
2356 if (mlx5_flags
& MLX5_QP_BIT_RWE
)
2357 ib_flags
|= IB_ACCESS_REMOTE_WRITE
;
2358 if (mlx5_flags
& MLX5_QP_BIT_RAE
)
2359 ib_flags
|= IB_ACCESS_REMOTE_ATOMIC
;
2364 static void to_ib_ah_attr(struct mlx5_ib_dev
*ibdev
, struct ib_ah_attr
*ib_ah_attr
,
2365 struct mlx5_qp_path
*path
)
2367 struct mlx5_core_dev
*dev
= &ibdev
->mdev
;
2369 memset(ib_ah_attr
, 0, sizeof(*ib_ah_attr
));
2370 ib_ah_attr
->port_num
= path
->port
;
2372 if (ib_ah_attr
->port_num
== 0 || ib_ah_attr
->port_num
> dev
->caps
.num_ports
)
2375 ib_ah_attr
->sl
= path
->sl
& 0xf;
2377 ib_ah_attr
->dlid
= be16_to_cpu(path
->rlid
);
2378 ib_ah_attr
->src_path_bits
= path
->grh_mlid
& 0x7f;
2379 ib_ah_attr
->static_rate
= path
->static_rate
? path
->static_rate
- 5 : 0;
2380 ib_ah_attr
->ah_flags
= (path
->grh_mlid
& (1 << 7)) ? IB_AH_GRH
: 0;
2381 if (ib_ah_attr
->ah_flags
) {
2382 ib_ah_attr
->grh
.sgid_index
= path
->mgid_index
;
2383 ib_ah_attr
->grh
.hop_limit
= path
->hop_limit
;
2384 ib_ah_attr
->grh
.traffic_class
=
2385 (be32_to_cpu(path
->tclass_flowlabel
) >> 20) & 0xff;
2386 ib_ah_attr
->grh
.flow_label
=
2387 be32_to_cpu(path
->tclass_flowlabel
) & 0xfffff;
2388 memcpy(ib_ah_attr
->grh
.dgid
.raw
,
2389 path
->rgid
, sizeof(ib_ah_attr
->grh
.dgid
.raw
));
2393 int mlx5_ib_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*qp_attr
, int qp_attr_mask
,
2394 struct ib_qp_init_attr
*qp_init_attr
)
2396 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
2397 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
2398 struct mlx5_query_qp_mbox_out
*outb
;
2399 struct mlx5_qp_context
*context
;
2403 mutex_lock(&qp
->mutex
);
2404 outb
= kzalloc(sizeof(*outb
), GFP_KERNEL
);
2409 context
= &outb
->ctx
;
2410 err
= mlx5_core_qp_query(&dev
->mdev
, &qp
->mqp
, outb
, sizeof(*outb
));
2414 mlx5_state
= be32_to_cpu(context
->flags
) >> 28;
2416 qp
->state
= to_ib_qp_state(mlx5_state
);
2417 qp_attr
->qp_state
= qp
->state
;
2418 qp_attr
->path_mtu
= context
->mtu_msgmax
>> 5;
2419 qp_attr
->path_mig_state
=
2420 to_ib_mig_state((be32_to_cpu(context
->flags
) >> 11) & 0x3);
2421 qp_attr
->qkey
= be32_to_cpu(context
->qkey
);
2422 qp_attr
->rq_psn
= be32_to_cpu(context
->rnr_nextrecvpsn
) & 0xffffff;
2423 qp_attr
->sq_psn
= be32_to_cpu(context
->next_send_psn
) & 0xffffff;
2424 qp_attr
->dest_qp_num
= be32_to_cpu(context
->log_pg_sz_remote_qpn
) & 0xffffff;
2425 qp_attr
->qp_access_flags
=
2426 to_ib_qp_access_flags(be32_to_cpu(context
->params2
));
2428 if (qp
->ibqp
.qp_type
== IB_QPT_RC
|| qp
->ibqp
.qp_type
== IB_QPT_UC
) {
2429 to_ib_ah_attr(dev
, &qp_attr
->ah_attr
, &context
->pri_path
);
2430 to_ib_ah_attr(dev
, &qp_attr
->alt_ah_attr
, &context
->alt_path
);
2431 qp_attr
->alt_pkey_index
= context
->alt_path
.pkey_index
& 0x7f;
2432 qp_attr
->alt_port_num
= qp_attr
->alt_ah_attr
.port_num
;
2435 qp_attr
->pkey_index
= context
->pri_path
.pkey_index
& 0x7f;
2436 qp_attr
->port_num
= context
->pri_path
.port
;
2438 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
2439 qp_attr
->sq_draining
= mlx5_state
== MLX5_QP_STATE_SQ_DRAINING
;
2441 qp_attr
->max_rd_atomic
= 1 << ((be32_to_cpu(context
->params1
) >> 21) & 0x7);
2443 qp_attr
->max_dest_rd_atomic
=
2444 1 << ((be32_to_cpu(context
->params2
) >> 21) & 0x7);
2445 qp_attr
->min_rnr_timer
=
2446 (be32_to_cpu(context
->rnr_nextrecvpsn
) >> 24) & 0x1f;
2447 qp_attr
->timeout
= context
->pri_path
.ackto_lt
>> 3;
2448 qp_attr
->retry_cnt
= (be32_to_cpu(context
->params1
) >> 16) & 0x7;
2449 qp_attr
->rnr_retry
= (be32_to_cpu(context
->params1
) >> 13) & 0x7;
2450 qp_attr
->alt_timeout
= context
->alt_path
.ackto_lt
>> 3;
2451 qp_attr
->cur_qp_state
= qp_attr
->qp_state
;
2452 qp_attr
->cap
.max_recv_wr
= qp
->rq
.wqe_cnt
;
2453 qp_attr
->cap
.max_recv_sge
= qp
->rq
.max_gs
;
2455 if (!ibqp
->uobject
) {
2456 qp_attr
->cap
.max_send_wr
= qp
->sq
.wqe_cnt
;
2457 qp_attr
->cap
.max_send_sge
= qp
->sq
.max_gs
;
2459 qp_attr
->cap
.max_send_wr
= 0;
2460 qp_attr
->cap
.max_send_sge
= 0;
2463 /* We don't support inline sends for kernel QPs (yet), and we
2464 * don't know what userspace's value should be.
2466 qp_attr
->cap
.max_inline_data
= 0;
2468 qp_init_attr
->cap
= qp_attr
->cap
;
2470 qp_init_attr
->create_flags
= 0;
2471 if (qp
->flags
& MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK
)
2472 qp_init_attr
->create_flags
|= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
;
2474 qp_init_attr
->sq_sig_type
= qp
->sq_signal_bits
& MLX5_WQE_CTRL_CQ_UPDATE
?
2475 IB_SIGNAL_ALL_WR
: IB_SIGNAL_REQ_WR
;
2481 mutex_unlock(&qp
->mutex
);
2485 struct ib_xrcd
*mlx5_ib_alloc_xrcd(struct ib_device
*ibdev
,
2486 struct ib_ucontext
*context
,
2487 struct ib_udata
*udata
)
2489 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
2490 struct mlx5_ib_xrcd
*xrcd
;
2493 if (!(dev
->mdev
.caps
.flags
& MLX5_DEV_CAP_FLAG_XRC
))
2494 return ERR_PTR(-ENOSYS
);
2496 xrcd
= kmalloc(sizeof(*xrcd
), GFP_KERNEL
);
2498 return ERR_PTR(-ENOMEM
);
2500 err
= mlx5_core_xrcd_alloc(&dev
->mdev
, &xrcd
->xrcdn
);
2503 return ERR_PTR(-ENOMEM
);
2506 return &xrcd
->ibxrcd
;
2509 int mlx5_ib_dealloc_xrcd(struct ib_xrcd
*xrcd
)
2511 struct mlx5_ib_dev
*dev
= to_mdev(xrcd
->device
);
2512 u32 xrcdn
= to_mxrcd(xrcd
)->xrcdn
;
2515 err
= mlx5_core_xrcd_dealloc(&dev
->mdev
, xrcdn
);
2517 mlx5_ib_warn(dev
, "failed to dealloc xrcdn 0x%x\n", xrcdn
);