2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/log2.h>
35 #include <linux/etherdevice.h>
37 #include <linux/slab.h>
38 #include <linux/netdevice.h>
40 #include <rdma/ib_cache.h>
41 #include <rdma/ib_pack.h>
42 #include <rdma/ib_addr.h>
43 #include <rdma/ib_mad.h>
45 #include <linux/mlx4/driver.h>
46 #include <linux/mlx4/qp.h>
49 #include <rdma/mlx4-abi.h>
51 static void mlx4_ib_lock_cqs(struct mlx4_ib_cq
*send_cq
,
52 struct mlx4_ib_cq
*recv_cq
);
53 static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq
*send_cq
,
54 struct mlx4_ib_cq
*recv_cq
);
55 static int _mlx4_ib_modify_wq(struct ib_wq
*ibwq
, enum ib_wq_state new_state
);
58 MLX4_IB_ACK_REQ_FREQ
= 8,
62 MLX4_IB_DEFAULT_SCHED_QUEUE
= 0x83,
63 MLX4_IB_DEFAULT_QP0_SCHED_QUEUE
= 0x3f,
64 MLX4_IB_LINK_TYPE_IB
= 0,
65 MLX4_IB_LINK_TYPE_ETH
= 1
70 * Largest possible UD header: send with GRH and immediate
71 * data plus 18 bytes for an Ethernet header with VLAN/802.1Q
72 * tag. (LRH would only use 8 bytes, so Ethernet is the
75 MLX4_IB_UD_HEADER_SIZE
= 82,
76 MLX4_IB_LSO_HEADER_SPARE
= 128,
84 struct ib_ud_header ud_header
;
85 u8 header_buf
[MLX4_IB_UD_HEADER_SIZE
];
86 struct ib_qp
*roce_v2_gsi
;
90 MLX4_IB_MIN_SQ_STRIDE
= 6,
91 MLX4_IB_CACHE_LINE_SIZE
= 64,
96 MLX4_RAW_QP_MSGMAX
= 31,
103 static const __be32 mlx4_ib_opcode
[] = {
104 [IB_WR_SEND
] = cpu_to_be32(MLX4_OPCODE_SEND
),
105 [IB_WR_LSO
] = cpu_to_be32(MLX4_OPCODE_LSO
),
106 [IB_WR_SEND_WITH_IMM
] = cpu_to_be32(MLX4_OPCODE_SEND_IMM
),
107 [IB_WR_RDMA_WRITE
] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE
),
108 [IB_WR_RDMA_WRITE_WITH_IMM
] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM
),
109 [IB_WR_RDMA_READ
] = cpu_to_be32(MLX4_OPCODE_RDMA_READ
),
110 [IB_WR_ATOMIC_CMP_AND_SWP
] = cpu_to_be32(MLX4_OPCODE_ATOMIC_CS
),
111 [IB_WR_ATOMIC_FETCH_AND_ADD
] = cpu_to_be32(MLX4_OPCODE_ATOMIC_FA
),
112 [IB_WR_SEND_WITH_INV
] = cpu_to_be32(MLX4_OPCODE_SEND_INVAL
),
113 [IB_WR_LOCAL_INV
] = cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL
),
114 [IB_WR_REG_MR
] = cpu_to_be32(MLX4_OPCODE_FMR
),
115 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP
] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_CS
),
116 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_FA
),
119 enum mlx4_ib_source_type
{
124 static struct mlx4_ib_sqp
*to_msqp(struct mlx4_ib_qp
*mqp
)
126 return container_of(mqp
, struct mlx4_ib_sqp
, qp
);
129 static int is_tunnel_qp(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
)
131 if (!mlx4_is_master(dev
->dev
))
134 return qp
->mqp
.qpn
>= dev
->dev
->phys_caps
.base_tunnel_sqpn
&&
135 qp
->mqp
.qpn
< dev
->dev
->phys_caps
.base_tunnel_sqpn
+
139 static int is_sqp(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
)
144 /* PPF or Native -- real SQP */
145 real_sqp
= ((mlx4_is_master(dev
->dev
) || !mlx4_is_mfunc(dev
->dev
)) &&
146 qp
->mqp
.qpn
>= dev
->dev
->phys_caps
.base_sqpn
&&
147 qp
->mqp
.qpn
<= dev
->dev
->phys_caps
.base_sqpn
+ 3);
150 /* VF or PF -- proxy SQP */
151 if (mlx4_is_mfunc(dev
->dev
)) {
152 for (i
= 0; i
< dev
->dev
->caps
.num_ports
; i
++) {
153 if (qp
->mqp
.qpn
== dev
->dev
->caps
.spec_qps
[i
].qp0_proxy
||
154 qp
->mqp
.qpn
== dev
->dev
->caps
.spec_qps
[i
].qp1_proxy
) {
163 return !!(qp
->flags
& MLX4_IB_ROCE_V2_GSI_QP
);
166 /* used for INIT/CLOSE port logic */
167 static int is_qp0(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
)
172 /* PPF or Native -- real QP0 */
173 real_qp0
= ((mlx4_is_master(dev
->dev
) || !mlx4_is_mfunc(dev
->dev
)) &&
174 qp
->mqp
.qpn
>= dev
->dev
->phys_caps
.base_sqpn
&&
175 qp
->mqp
.qpn
<= dev
->dev
->phys_caps
.base_sqpn
+ 1);
178 /* VF or PF -- proxy QP0 */
179 if (mlx4_is_mfunc(dev
->dev
)) {
180 for (i
= 0; i
< dev
->dev
->caps
.num_ports
; i
++) {
181 if (qp
->mqp
.qpn
== dev
->dev
->caps
.spec_qps
[i
].qp0_proxy
) {
190 static void *get_wqe(struct mlx4_ib_qp
*qp
, int offset
)
192 return mlx4_buf_offset(&qp
->buf
, offset
);
195 static void *get_recv_wqe(struct mlx4_ib_qp
*qp
, int n
)
197 return get_wqe(qp
, qp
->rq
.offset
+ (n
<< qp
->rq
.wqe_shift
));
200 static void *get_send_wqe(struct mlx4_ib_qp
*qp
, int n
)
202 return get_wqe(qp
, qp
->sq
.offset
+ (n
<< qp
->sq
.wqe_shift
));
206 * Stamp a SQ WQE so that it is invalid if prefetched by marking the
207 * first four bytes of every 64 byte chunk with
208 * 0x7FFFFFF | (invalid_ownership_value << 31).
210 * When the max work request size is less than or equal to the WQE
211 * basic block size, as an optimization, we can stamp all WQEs with
212 * 0xffffffff, and skip the very first chunk of each WQE.
214 static void stamp_send_wqe(struct mlx4_ib_qp
*qp
, int n
, int size
)
222 struct mlx4_wqe_ctrl_seg
*ctrl
;
224 if (qp
->sq_max_wqes_per_wr
> 1) {
225 s
= roundup(size
, 1U << qp
->sq
.wqe_shift
);
226 for (i
= 0; i
< s
; i
+= 64) {
227 ind
= (i
>> qp
->sq
.wqe_shift
) + n
;
228 stamp
= ind
& qp
->sq
.wqe_cnt
? cpu_to_be32(0x7fffffff) :
229 cpu_to_be32(0xffffffff);
230 buf
= get_send_wqe(qp
, ind
& (qp
->sq
.wqe_cnt
- 1));
231 wqe
= buf
+ (i
& ((1 << qp
->sq
.wqe_shift
) - 1));
235 ctrl
= buf
= get_send_wqe(qp
, n
& (qp
->sq
.wqe_cnt
- 1));
236 s
= (ctrl
->qpn_vlan
.fence_size
& 0x3f) << 4;
237 for (i
= 64; i
< s
; i
+= 64) {
239 *wqe
= cpu_to_be32(0xffffffff);
244 static void post_nop_wqe(struct mlx4_ib_qp
*qp
, int n
, int size
)
246 struct mlx4_wqe_ctrl_seg
*ctrl
;
247 struct mlx4_wqe_inline_seg
*inl
;
251 ctrl
= wqe
= get_send_wqe(qp
, n
& (qp
->sq
.wqe_cnt
- 1));
252 s
= sizeof(struct mlx4_wqe_ctrl_seg
);
254 if (qp
->ibqp
.qp_type
== IB_QPT_UD
) {
255 struct mlx4_wqe_datagram_seg
*dgram
= wqe
+ sizeof *ctrl
;
256 struct mlx4_av
*av
= (struct mlx4_av
*)dgram
->av
;
257 memset(dgram
, 0, sizeof *dgram
);
258 av
->port_pd
= cpu_to_be32((qp
->port
<< 24) | to_mpd(qp
->ibqp
.pd
)->pdn
);
259 s
+= sizeof(struct mlx4_wqe_datagram_seg
);
262 /* Pad the remainder of the WQE with an inline data segment. */
265 inl
->byte_count
= cpu_to_be32(1 << 31 | (size
- s
- sizeof *inl
));
267 ctrl
->srcrb_flags
= 0;
268 ctrl
->qpn_vlan
.fence_size
= size
/ 16;
270 * Make sure descriptor is fully written before setting ownership bit
271 * (because HW can start executing as soon as we do).
275 ctrl
->owner_opcode
= cpu_to_be32(MLX4_OPCODE_NOP
| MLX4_WQE_CTRL_NEC
) |
276 (n
& qp
->sq
.wqe_cnt
? cpu_to_be32(1 << 31) : 0);
278 stamp_send_wqe(qp
, n
+ qp
->sq_spare_wqes
, size
);
281 /* Post NOP WQE to prevent wrap-around in the middle of WR */
282 static inline unsigned pad_wraparound(struct mlx4_ib_qp
*qp
, int ind
)
284 unsigned s
= qp
->sq
.wqe_cnt
- (ind
& (qp
->sq
.wqe_cnt
- 1));
285 if (unlikely(s
< qp
->sq_max_wqes_per_wr
)) {
286 post_nop_wqe(qp
, ind
, s
<< qp
->sq
.wqe_shift
);
292 static void mlx4_ib_qp_event(struct mlx4_qp
*qp
, enum mlx4_event type
)
294 struct ib_event event
;
295 struct ib_qp
*ibqp
= &to_mibqp(qp
)->ibqp
;
297 if (type
== MLX4_EVENT_TYPE_PATH_MIG
)
298 to_mibqp(qp
)->port
= to_mibqp(qp
)->alt_port
;
300 if (ibqp
->event_handler
) {
301 event
.device
= ibqp
->device
;
302 event
.element
.qp
= ibqp
;
304 case MLX4_EVENT_TYPE_PATH_MIG
:
305 event
.event
= IB_EVENT_PATH_MIG
;
307 case MLX4_EVENT_TYPE_COMM_EST
:
308 event
.event
= IB_EVENT_COMM_EST
;
310 case MLX4_EVENT_TYPE_SQ_DRAINED
:
311 event
.event
= IB_EVENT_SQ_DRAINED
;
313 case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE
:
314 event
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
316 case MLX4_EVENT_TYPE_WQ_CATAS_ERROR
:
317 event
.event
= IB_EVENT_QP_FATAL
;
319 case MLX4_EVENT_TYPE_PATH_MIG_FAILED
:
320 event
.event
= IB_EVENT_PATH_MIG_ERR
;
322 case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR
:
323 event
.event
= IB_EVENT_QP_REQ_ERR
;
325 case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR
:
326 event
.event
= IB_EVENT_QP_ACCESS_ERR
;
329 pr_warn("Unexpected event type %d "
330 "on QP %06x\n", type
, qp
->qpn
);
334 ibqp
->event_handler(&event
, ibqp
->qp_context
);
338 static void mlx4_ib_wq_event(struct mlx4_qp
*qp
, enum mlx4_event type
)
340 pr_warn_ratelimited("Unexpected event type %d on WQ 0x%06x. Events are not supported for WQs\n",
344 static int send_wqe_overhead(enum mlx4_ib_qp_type type
, u32 flags
)
347 * UD WQEs must have a datagram segment.
348 * RC and UC WQEs might have a remote address segment.
349 * MLX WQEs need two extra inline data segments (for the UD
350 * header and space for the ICRC).
354 return sizeof (struct mlx4_wqe_ctrl_seg
) +
355 sizeof (struct mlx4_wqe_datagram_seg
) +
356 ((flags
& MLX4_IB_QP_LSO
) ? MLX4_IB_LSO_HEADER_SPARE
: 0);
357 case MLX4_IB_QPT_PROXY_SMI_OWNER
:
358 case MLX4_IB_QPT_PROXY_SMI
:
359 case MLX4_IB_QPT_PROXY_GSI
:
360 return sizeof (struct mlx4_wqe_ctrl_seg
) +
361 sizeof (struct mlx4_wqe_datagram_seg
) + 64;
362 case MLX4_IB_QPT_TUN_SMI_OWNER
:
363 case MLX4_IB_QPT_TUN_GSI
:
364 return sizeof (struct mlx4_wqe_ctrl_seg
) +
365 sizeof (struct mlx4_wqe_datagram_seg
);
368 return sizeof (struct mlx4_wqe_ctrl_seg
) +
369 sizeof (struct mlx4_wqe_raddr_seg
);
371 return sizeof (struct mlx4_wqe_ctrl_seg
) +
372 sizeof (struct mlx4_wqe_masked_atomic_seg
) +
373 sizeof (struct mlx4_wqe_raddr_seg
);
374 case MLX4_IB_QPT_SMI
:
375 case MLX4_IB_QPT_GSI
:
376 return sizeof (struct mlx4_wqe_ctrl_seg
) +
377 ALIGN(MLX4_IB_UD_HEADER_SIZE
+
378 DIV_ROUND_UP(MLX4_IB_UD_HEADER_SIZE
,
380 sizeof (struct mlx4_wqe_inline_seg
),
381 sizeof (struct mlx4_wqe_data_seg
)) +
383 sizeof (struct mlx4_wqe_inline_seg
),
384 sizeof (struct mlx4_wqe_data_seg
));
386 return sizeof (struct mlx4_wqe_ctrl_seg
);
390 static int set_rq_size(struct mlx4_ib_dev
*dev
, struct ib_qp_cap
*cap
,
391 int is_user
, int has_rq
, struct mlx4_ib_qp
*qp
,
394 /* Sanity check RQ size before proceeding */
395 if (cap
->max_recv_wr
> dev
->dev
->caps
.max_wqes
- MLX4_IB_SQ_MAX_SPARE
||
396 cap
->max_recv_sge
> min(dev
->dev
->caps
.max_sq_sg
, dev
->dev
->caps
.max_rq_sg
))
400 if (cap
->max_recv_wr
|| inl_recv_sz
)
403 qp
->rq
.wqe_cnt
= qp
->rq
.max_gs
= 0;
405 u32 max_inl_recv_sz
= dev
->dev
->caps
.max_rq_sg
*
406 sizeof(struct mlx4_wqe_data_seg
);
409 /* HW requires >= 1 RQ entry with >= 1 gather entry */
410 if (is_user
&& (!cap
->max_recv_wr
|| !cap
->max_recv_sge
||
411 inl_recv_sz
> max_inl_recv_sz
))
414 qp
->rq
.wqe_cnt
= roundup_pow_of_two(max(1U, cap
->max_recv_wr
));
415 qp
->rq
.max_gs
= roundup_pow_of_two(max(1U, cap
->max_recv_sge
));
416 wqe_size
= qp
->rq
.max_gs
* sizeof(struct mlx4_wqe_data_seg
);
417 qp
->rq
.wqe_shift
= ilog2(max_t(u32
, wqe_size
, inl_recv_sz
));
420 /* leave userspace return values as they were, so as not to break ABI */
422 cap
->max_recv_wr
= qp
->rq
.max_post
= qp
->rq
.wqe_cnt
;
423 cap
->max_recv_sge
= qp
->rq
.max_gs
;
425 cap
->max_recv_wr
= qp
->rq
.max_post
=
426 min(dev
->dev
->caps
.max_wqes
- MLX4_IB_SQ_MAX_SPARE
, qp
->rq
.wqe_cnt
);
427 cap
->max_recv_sge
= min(qp
->rq
.max_gs
,
428 min(dev
->dev
->caps
.max_sq_sg
,
429 dev
->dev
->caps
.max_rq_sg
));
435 static int set_kernel_sq_size(struct mlx4_ib_dev
*dev
, struct ib_qp_cap
*cap
,
436 enum mlx4_ib_qp_type type
, struct mlx4_ib_qp
*qp
,
441 /* Sanity check SQ size before proceeding */
442 if (cap
->max_send_wr
> (dev
->dev
->caps
.max_wqes
- MLX4_IB_SQ_MAX_SPARE
) ||
443 cap
->max_send_sge
> min(dev
->dev
->caps
.max_sq_sg
, dev
->dev
->caps
.max_rq_sg
) ||
444 cap
->max_inline_data
+ send_wqe_overhead(type
, qp
->flags
) +
445 sizeof (struct mlx4_wqe_inline_seg
) > dev
->dev
->caps
.max_sq_desc_sz
)
449 * For MLX transport we need 2 extra S/G entries:
450 * one for the header and one for the checksum at the end
452 if ((type
== MLX4_IB_QPT_SMI
|| type
== MLX4_IB_QPT_GSI
||
453 type
& (MLX4_IB_QPT_PROXY_SMI_OWNER
| MLX4_IB_QPT_TUN_SMI_OWNER
)) &&
454 cap
->max_send_sge
+ 2 > dev
->dev
->caps
.max_sq_sg
)
457 s
= max(cap
->max_send_sge
* sizeof (struct mlx4_wqe_data_seg
),
458 cap
->max_inline_data
+ sizeof (struct mlx4_wqe_inline_seg
)) +
459 send_wqe_overhead(type
, qp
->flags
);
461 if (s
> dev
->dev
->caps
.max_sq_desc_sz
)
465 * Hermon supports shrinking WQEs, such that a single work
466 * request can include multiple units of 1 << wqe_shift. This
467 * way, work requests can differ in size, and do not have to
468 * be a power of 2 in size, saving memory and speeding up send
469 * WR posting. Unfortunately, if we do this then the
470 * wqe_index field in CQEs can't be used to look up the WR ID
471 * anymore, so we do this only if selective signaling is off.
473 * Further, on 32-bit platforms, we can't use vmap() to make
474 * the QP buffer virtually contiguous. Thus we have to use
475 * constant-sized WRs to make sure a WR is always fully within
476 * a single page-sized chunk.
478 * Finally, we use NOP work requests to pad the end of the
479 * work queue, to avoid wrap-around in the middle of WR. We
480 * set NEC bit to avoid getting completions with error for
481 * these NOP WRs, but since NEC is only supported starting
482 * with firmware 2.2.232, we use constant-sized WRs for older
485 * And, since MLX QPs only support SEND, we use constant-sized
488 * We look for the smallest value of wqe_shift such that the
489 * resulting number of wqes does not exceed device
492 * We set WQE size to at least 64 bytes, this way stamping
493 * invalidates each WQE.
495 if (shrink_wqe
&& dev
->dev
->caps
.fw_ver
>= MLX4_FW_VER_WQE_CTRL_NEC
&&
496 qp
->sq_signal_bits
&& BITS_PER_LONG
== 64 &&
497 type
!= MLX4_IB_QPT_SMI
&& type
!= MLX4_IB_QPT_GSI
&&
498 !(type
& (MLX4_IB_QPT_PROXY_SMI_OWNER
| MLX4_IB_QPT_PROXY_SMI
|
499 MLX4_IB_QPT_PROXY_GSI
| MLX4_IB_QPT_TUN_SMI_OWNER
)))
500 qp
->sq
.wqe_shift
= ilog2(64);
502 qp
->sq
.wqe_shift
= ilog2(roundup_pow_of_two(s
));
505 qp
->sq_max_wqes_per_wr
= DIV_ROUND_UP(s
, 1U << qp
->sq
.wqe_shift
);
508 * We need to leave 2 KB + 1 WR of headroom in the SQ to
509 * allow HW to prefetch.
511 qp
->sq_spare_wqes
= (2048 >> qp
->sq
.wqe_shift
) + qp
->sq_max_wqes_per_wr
;
512 qp
->sq
.wqe_cnt
= roundup_pow_of_two(cap
->max_send_wr
*
513 qp
->sq_max_wqes_per_wr
+
516 if (qp
->sq
.wqe_cnt
<= dev
->dev
->caps
.max_wqes
)
519 if (qp
->sq_max_wqes_per_wr
<= 1)
525 qp
->sq
.max_gs
= (min(dev
->dev
->caps
.max_sq_desc_sz
,
526 (qp
->sq_max_wqes_per_wr
<< qp
->sq
.wqe_shift
)) -
527 send_wqe_overhead(type
, qp
->flags
)) /
528 sizeof (struct mlx4_wqe_data_seg
);
530 qp
->buf_size
= (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
) +
531 (qp
->sq
.wqe_cnt
<< qp
->sq
.wqe_shift
);
532 if (qp
->rq
.wqe_shift
> qp
->sq
.wqe_shift
) {
534 qp
->sq
.offset
= qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
;
536 qp
->rq
.offset
= qp
->sq
.wqe_cnt
<< qp
->sq
.wqe_shift
;
540 cap
->max_send_wr
= qp
->sq
.max_post
=
541 (qp
->sq
.wqe_cnt
- qp
->sq_spare_wqes
) / qp
->sq_max_wqes_per_wr
;
542 cap
->max_send_sge
= min(qp
->sq
.max_gs
,
543 min(dev
->dev
->caps
.max_sq_sg
,
544 dev
->dev
->caps
.max_rq_sg
));
545 /* We don't support inline sends for kernel QPs (yet) */
546 cap
->max_inline_data
= 0;
551 static int set_user_sq_size(struct mlx4_ib_dev
*dev
,
552 struct mlx4_ib_qp
*qp
,
553 struct mlx4_ib_create_qp
*ucmd
)
555 /* Sanity check SQ size before proceeding */
556 if ((1 << ucmd
->log_sq_bb_count
) > dev
->dev
->caps
.max_wqes
||
557 ucmd
->log_sq_stride
>
558 ilog2(roundup_pow_of_two(dev
->dev
->caps
.max_sq_desc_sz
)) ||
559 ucmd
->log_sq_stride
< MLX4_IB_MIN_SQ_STRIDE
)
562 qp
->sq
.wqe_cnt
= 1 << ucmd
->log_sq_bb_count
;
563 qp
->sq
.wqe_shift
= ucmd
->log_sq_stride
;
565 qp
->buf_size
= (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
) +
566 (qp
->sq
.wqe_cnt
<< qp
->sq
.wqe_shift
);
571 static int alloc_proxy_bufs(struct ib_device
*dev
, struct mlx4_ib_qp
*qp
)
576 kmalloc_array(qp
->rq
.wqe_cnt
, sizeof(struct mlx4_ib_buf
),
578 if (!qp
->sqp_proxy_rcv
)
580 for (i
= 0; i
< qp
->rq
.wqe_cnt
; i
++) {
581 qp
->sqp_proxy_rcv
[i
].addr
=
582 kmalloc(sizeof (struct mlx4_ib_proxy_sqp_hdr
),
584 if (!qp
->sqp_proxy_rcv
[i
].addr
)
586 qp
->sqp_proxy_rcv
[i
].map
=
587 ib_dma_map_single(dev
, qp
->sqp_proxy_rcv
[i
].addr
,
588 sizeof (struct mlx4_ib_proxy_sqp_hdr
),
590 if (ib_dma_mapping_error(dev
, qp
->sqp_proxy_rcv
[i
].map
)) {
591 kfree(qp
->sqp_proxy_rcv
[i
].addr
);
600 ib_dma_unmap_single(dev
, qp
->sqp_proxy_rcv
[i
].map
,
601 sizeof (struct mlx4_ib_proxy_sqp_hdr
),
603 kfree(qp
->sqp_proxy_rcv
[i
].addr
);
605 kfree(qp
->sqp_proxy_rcv
);
606 qp
->sqp_proxy_rcv
= NULL
;
610 static void free_proxy_bufs(struct ib_device
*dev
, struct mlx4_ib_qp
*qp
)
614 for (i
= 0; i
< qp
->rq
.wqe_cnt
; i
++) {
615 ib_dma_unmap_single(dev
, qp
->sqp_proxy_rcv
[i
].map
,
616 sizeof (struct mlx4_ib_proxy_sqp_hdr
),
618 kfree(qp
->sqp_proxy_rcv
[i
].addr
);
620 kfree(qp
->sqp_proxy_rcv
);
623 static int qp_has_rq(struct ib_qp_init_attr
*attr
)
625 if (attr
->qp_type
== IB_QPT_XRC_INI
|| attr
->qp_type
== IB_QPT_XRC_TGT
)
631 static int qp0_enabled_vf(struct mlx4_dev
*dev
, int qpn
)
634 for (i
= 0; i
< dev
->caps
.num_ports
; i
++) {
635 if (qpn
== dev
->caps
.spec_qps
[i
].qp0_proxy
)
636 return !!dev
->caps
.spec_qps
[i
].qp0_qkey
;
641 static void mlx4_ib_free_qp_counter(struct mlx4_ib_dev
*dev
,
642 struct mlx4_ib_qp
*qp
)
644 mutex_lock(&dev
->counters_table
[qp
->port
- 1].mutex
);
645 mlx4_counter_free(dev
->dev
, qp
->counter_index
->index
);
646 list_del(&qp
->counter_index
->list
);
647 mutex_unlock(&dev
->counters_table
[qp
->port
- 1].mutex
);
649 kfree(qp
->counter_index
);
650 qp
->counter_index
= NULL
;
653 static int set_qp_rss(struct mlx4_ib_dev
*dev
, struct mlx4_ib_rss
*rss_ctx
,
654 struct ib_qp_init_attr
*init_attr
,
655 struct mlx4_ib_create_qp_rss
*ucmd
)
657 rss_ctx
->base_qpn_tbl_sz
= init_attr
->rwq_ind_tbl
->ind_tbl
[0]->wq_num
|
658 (init_attr
->rwq_ind_tbl
->log_ind_tbl_size
<< 24);
660 if ((ucmd
->rx_hash_function
== MLX4_IB_RX_HASH_FUNC_TOEPLITZ
) &&
661 (dev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_RSS_TOP
)) {
662 memcpy(rss_ctx
->rss_key
, ucmd
->rx_hash_key
,
663 MLX4_EN_RSS_KEY_SIZE
);
665 pr_debug("RX Hash function is not supported\n");
666 return (-EOPNOTSUPP
);
669 if (ucmd
->rx_hash_fields_mask
& ~(MLX4_IB_RX_HASH_SRC_IPV4
|
670 MLX4_IB_RX_HASH_DST_IPV4
|
671 MLX4_IB_RX_HASH_SRC_IPV6
|
672 MLX4_IB_RX_HASH_DST_IPV6
|
673 MLX4_IB_RX_HASH_SRC_PORT_TCP
|
674 MLX4_IB_RX_HASH_DST_PORT_TCP
|
675 MLX4_IB_RX_HASH_SRC_PORT_UDP
|
676 MLX4_IB_RX_HASH_DST_PORT_UDP
|
677 MLX4_IB_RX_HASH_INNER
)) {
678 pr_debug("RX Hash fields_mask has unsupported mask (0x%llx)\n",
679 ucmd
->rx_hash_fields_mask
);
680 return (-EOPNOTSUPP
);
683 if ((ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_SRC_IPV4
) &&
684 (ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_DST_IPV4
)) {
685 rss_ctx
->flags
= MLX4_RSS_IPV4
;
686 } else if ((ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_SRC_IPV4
) ||
687 (ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_DST_IPV4
)) {
688 pr_debug("RX Hash fields_mask is not supported - both IPv4 SRC and DST must be set\n");
689 return (-EOPNOTSUPP
);
692 if ((ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_SRC_IPV6
) &&
693 (ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_DST_IPV6
)) {
694 rss_ctx
->flags
|= MLX4_RSS_IPV6
;
695 } else if ((ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_SRC_IPV6
) ||
696 (ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_DST_IPV6
)) {
697 pr_debug("RX Hash fields_mask is not supported - both IPv6 SRC and DST must be set\n");
698 return (-EOPNOTSUPP
);
701 if ((ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_SRC_PORT_UDP
) &&
702 (ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_DST_PORT_UDP
)) {
703 if (!(dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_UDP_RSS
)) {
704 pr_debug("RX Hash fields_mask for UDP is not supported\n");
705 return (-EOPNOTSUPP
);
708 if (rss_ctx
->flags
& MLX4_RSS_IPV4
)
709 rss_ctx
->flags
|= MLX4_RSS_UDP_IPV4
;
710 if (rss_ctx
->flags
& MLX4_RSS_IPV6
)
711 rss_ctx
->flags
|= MLX4_RSS_UDP_IPV6
;
712 if (!(rss_ctx
->flags
& (MLX4_RSS_IPV6
| MLX4_RSS_IPV4
))) {
713 pr_debug("RX Hash fields_mask is not supported - UDP must be set with IPv4 or IPv6\n");
714 return (-EOPNOTSUPP
);
716 } else if ((ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_SRC_PORT_UDP
) ||
717 (ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_DST_PORT_UDP
)) {
718 pr_debug("RX Hash fields_mask is not supported - both UDP SRC and DST must be set\n");
719 return (-EOPNOTSUPP
);
722 if ((ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_SRC_PORT_TCP
) &&
723 (ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_DST_PORT_TCP
)) {
724 if (rss_ctx
->flags
& MLX4_RSS_IPV4
)
725 rss_ctx
->flags
|= MLX4_RSS_TCP_IPV4
;
726 if (rss_ctx
->flags
& MLX4_RSS_IPV6
)
727 rss_ctx
->flags
|= MLX4_RSS_TCP_IPV6
;
728 if (!(rss_ctx
->flags
& (MLX4_RSS_IPV6
| MLX4_RSS_IPV4
))) {
729 pr_debug("RX Hash fields_mask is not supported - TCP must be set with IPv4 or IPv6\n");
730 return (-EOPNOTSUPP
);
732 } else if ((ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_SRC_PORT_TCP
) ||
733 (ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_DST_PORT_TCP
)) {
734 pr_debug("RX Hash fields_mask is not supported - both TCP SRC and DST must be set\n");
735 return (-EOPNOTSUPP
);
738 if (ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_INNER
) {
739 if (dev
->dev
->caps
.tunnel_offload_mode
==
740 MLX4_TUNNEL_OFFLOAD_MODE_VXLAN
) {
742 * Hash according to inner headers if exist, otherwise
743 * according to outer headers.
745 rss_ctx
->flags
|= MLX4_RSS_BY_INNER_HEADERS_IPONLY
;
747 pr_debug("RSS Hash for inner headers isn't supported\n");
748 return (-EOPNOTSUPP
);
755 static int create_qp_rss(struct mlx4_ib_dev
*dev
,
756 struct ib_qp_init_attr
*init_attr
,
757 struct mlx4_ib_create_qp_rss
*ucmd
,
758 struct mlx4_ib_qp
*qp
)
763 qp
->mqp
.usage
= MLX4_RES_USAGE_USER_VERBS
;
765 err
= mlx4_qp_reserve_range(dev
->dev
, 1, 1, &qpn
, 0, qp
->mqp
.usage
);
769 err
= mlx4_qp_alloc(dev
->dev
, qpn
, &qp
->mqp
);
773 mutex_init(&qp
->mutex
);
775 INIT_LIST_HEAD(&qp
->gid_list
);
776 INIT_LIST_HEAD(&qp
->steering_rules
);
778 qp
->mlx4_ib_qp_type
= MLX4_IB_QPT_RAW_PACKET
;
779 qp
->state
= IB_QPS_RESET
;
781 /* Set dummy send resources to be compatible with HV and PRM */
782 qp
->sq_no_prefetch
= 1;
784 qp
->sq
.wqe_shift
= MLX4_IB_MIN_SQ_STRIDE
;
785 qp
->buf_size
= qp
->sq
.wqe_cnt
<< MLX4_IB_MIN_SQ_STRIDE
;
787 (struct ib_qp
*)init_attr
->rwq_ind_tbl
->ind_tbl
[0]))->mtt
;
789 qp
->rss_ctx
= kzalloc(sizeof(*qp
->rss_ctx
), GFP_KERNEL
);
795 err
= set_qp_rss(dev
, qp
->rss_ctx
, init_attr
, ucmd
);
805 mlx4_qp_remove(dev
->dev
, &qp
->mqp
);
806 mlx4_qp_free(dev
->dev
, &qp
->mqp
);
809 mlx4_qp_release_range(dev
->dev
, qpn
, 1);
813 static struct ib_qp
*_mlx4_ib_create_qp_rss(struct ib_pd
*pd
,
814 struct ib_qp_init_attr
*init_attr
,
815 struct ib_udata
*udata
)
817 struct mlx4_ib_qp
*qp
;
818 struct mlx4_ib_create_qp_rss ucmd
= {};
819 size_t required_cmd_sz
;
823 pr_debug("RSS QP with NULL udata\n");
824 return ERR_PTR(-EINVAL
);
828 return ERR_PTR(-EOPNOTSUPP
);
830 required_cmd_sz
= offsetof(typeof(ucmd
), reserved1
) +
831 sizeof(ucmd
.reserved1
);
832 if (udata
->inlen
< required_cmd_sz
) {
833 pr_debug("invalid inlen\n");
834 return ERR_PTR(-EINVAL
);
837 if (ib_copy_from_udata(&ucmd
, udata
, min(sizeof(ucmd
), udata
->inlen
))) {
838 pr_debug("copy failed\n");
839 return ERR_PTR(-EFAULT
);
842 if (memchr_inv(ucmd
.reserved
, 0, sizeof(ucmd
.reserved
)))
843 return ERR_PTR(-EOPNOTSUPP
);
845 if (ucmd
.comp_mask
|| ucmd
.reserved1
)
846 return ERR_PTR(-EOPNOTSUPP
);
848 if (udata
->inlen
> sizeof(ucmd
) &&
849 !ib_is_udata_cleared(udata
, sizeof(ucmd
),
850 udata
->inlen
- sizeof(ucmd
))) {
851 pr_debug("inlen is not supported\n");
852 return ERR_PTR(-EOPNOTSUPP
);
855 if (init_attr
->qp_type
!= IB_QPT_RAW_PACKET
) {
856 pr_debug("RSS QP with unsupported QP type %d\n",
858 return ERR_PTR(-EOPNOTSUPP
);
861 if (init_attr
->create_flags
) {
862 pr_debug("RSS QP doesn't support create flags\n");
863 return ERR_PTR(-EOPNOTSUPP
);
866 if (init_attr
->send_cq
|| init_attr
->cap
.max_send_wr
) {
867 pr_debug("RSS QP with unsupported send attributes\n");
868 return ERR_PTR(-EOPNOTSUPP
);
871 qp
= kzalloc(sizeof(*qp
), GFP_KERNEL
);
873 return ERR_PTR(-ENOMEM
);
875 qp
->pri
.vid
= 0xFFFF;
876 qp
->alt
.vid
= 0xFFFF;
878 err
= create_qp_rss(to_mdev(pd
->device
), init_attr
, &ucmd
, qp
);
884 qp
->ibqp
.qp_num
= qp
->mqp
.qpn
;
890 * This function allocates a WQN from a range which is consecutive and aligned
891 * to its size. In case the range is full, then it creates a new range and
892 * allocates WQN from it. The new range will be used for following allocations.
894 static int mlx4_ib_alloc_wqn(struct mlx4_ib_ucontext
*context
,
895 struct mlx4_ib_qp
*qp
, int range_size
, int *wqn
)
897 struct mlx4_ib_dev
*dev
= to_mdev(context
->ibucontext
.device
);
898 struct mlx4_wqn_range
*range
;
901 mutex_lock(&context
->wqn_ranges_mutex
);
903 range
= list_first_entry_or_null(&context
->wqn_ranges_list
,
904 struct mlx4_wqn_range
, list
);
906 if (!range
|| (range
->refcount
== range
->size
) || range
->dirty
) {
907 range
= kzalloc(sizeof(*range
), GFP_KERNEL
);
913 err
= mlx4_qp_reserve_range(dev
->dev
, range_size
,
914 range_size
, &range
->base_wqn
, 0,
921 range
->size
= range_size
;
922 list_add(&range
->list
, &context
->wqn_ranges_list
);
923 } else if (range_size
!= 1) {
925 * Requesting a new range (>1) when last range is still open, is
932 qp
->wqn_range
= range
;
934 *wqn
= range
->base_wqn
+ range
->refcount
;
939 mutex_unlock(&context
->wqn_ranges_mutex
);
944 static void mlx4_ib_release_wqn(struct mlx4_ib_ucontext
*context
,
945 struct mlx4_ib_qp
*qp
, bool dirty_release
)
947 struct mlx4_ib_dev
*dev
= to_mdev(context
->ibucontext
.device
);
948 struct mlx4_wqn_range
*range
;
950 mutex_lock(&context
->wqn_ranges_mutex
);
952 range
= qp
->wqn_range
;
955 if (!range
->refcount
) {
956 mlx4_qp_release_range(dev
->dev
, range
->base_wqn
,
958 list_del(&range
->list
);
960 } else if (dirty_release
) {
962 * A range which one of its WQNs is destroyed, won't be able to be
963 * reused for further WQN allocations.
964 * The next created WQ will allocate a new range.
969 mutex_unlock(&context
->wqn_ranges_mutex
);
972 static int create_qp_common(struct mlx4_ib_dev
*dev
, struct ib_pd
*pd
,
973 enum mlx4_ib_source_type src
,
974 struct ib_qp_init_attr
*init_attr
,
975 struct ib_udata
*udata
, int sqpn
,
976 struct mlx4_ib_qp
**caller_qp
)
980 struct ib_qp_cap backup_cap
;
981 struct mlx4_ib_sqp
*sqp
= NULL
;
982 struct mlx4_ib_qp
*qp
;
983 enum mlx4_ib_qp_type qp_type
= (enum mlx4_ib_qp_type
) init_attr
->qp_type
;
984 struct mlx4_ib_cq
*mcq
;
988 /* When tunneling special qps, we use a plain UD qp */
990 if (mlx4_is_mfunc(dev
->dev
) &&
991 (!mlx4_is_master(dev
->dev
) ||
992 !(init_attr
->create_flags
& MLX4_IB_SRIOV_SQP
))) {
993 if (init_attr
->qp_type
== IB_QPT_GSI
)
994 qp_type
= MLX4_IB_QPT_PROXY_GSI
;
996 if (mlx4_is_master(dev
->dev
) ||
997 qp0_enabled_vf(dev
->dev
, sqpn
))
998 qp_type
= MLX4_IB_QPT_PROXY_SMI_OWNER
;
1000 qp_type
= MLX4_IB_QPT_PROXY_SMI
;
1004 /* add extra sg entry for tunneling */
1005 init_attr
->cap
.max_recv_sge
++;
1006 } else if (init_attr
->create_flags
& MLX4_IB_SRIOV_TUNNEL_QP
) {
1007 struct mlx4_ib_qp_tunnel_init_attr
*tnl_init
=
1008 container_of(init_attr
,
1009 struct mlx4_ib_qp_tunnel_init_attr
, init_attr
);
1010 if ((tnl_init
->proxy_qp_type
!= IB_QPT_SMI
&&
1011 tnl_init
->proxy_qp_type
!= IB_QPT_GSI
) ||
1012 !mlx4_is_master(dev
->dev
))
1014 if (tnl_init
->proxy_qp_type
== IB_QPT_GSI
)
1015 qp_type
= MLX4_IB_QPT_TUN_GSI
;
1016 else if (tnl_init
->slave
== mlx4_master_func_num(dev
->dev
) ||
1017 mlx4_vf_smi_enabled(dev
->dev
, tnl_init
->slave
,
1019 qp_type
= MLX4_IB_QPT_TUN_SMI_OWNER
;
1021 qp_type
= MLX4_IB_QPT_TUN_SMI
;
1022 /* we are definitely in the PPF here, since we are creating
1023 * tunnel QPs. base_tunnel_sqpn is therefore valid. */
1024 qpn
= dev
->dev
->phys_caps
.base_tunnel_sqpn
+ 8 * tnl_init
->slave
1025 + tnl_init
->proxy_qp_type
* 2 + tnl_init
->port
- 1;
1030 if (qp_type
== MLX4_IB_QPT_SMI
|| qp_type
== MLX4_IB_QPT_GSI
||
1031 (qp_type
& (MLX4_IB_QPT_PROXY_SMI
| MLX4_IB_QPT_PROXY_SMI_OWNER
|
1032 MLX4_IB_QPT_PROXY_GSI
| MLX4_IB_QPT_TUN_SMI_OWNER
))) {
1033 sqp
= kzalloc(sizeof(struct mlx4_ib_sqp
), GFP_KERNEL
);
1037 qp
->pri
.vid
= 0xFFFF;
1038 qp
->alt
.vid
= 0xFFFF;
1040 qp
= kzalloc(sizeof(struct mlx4_ib_qp
), GFP_KERNEL
);
1043 qp
->pri
.vid
= 0xFFFF;
1044 qp
->alt
.vid
= 0xFFFF;
1049 qp
->mlx4_ib_qp_type
= qp_type
;
1051 mutex_init(&qp
->mutex
);
1052 spin_lock_init(&qp
->sq
.lock
);
1053 spin_lock_init(&qp
->rq
.lock
);
1054 INIT_LIST_HEAD(&qp
->gid_list
);
1055 INIT_LIST_HEAD(&qp
->steering_rules
);
1057 qp
->state
= IB_QPS_RESET
;
1058 if (init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
)
1059 qp
->sq_signal_bits
= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
);
1064 struct mlx4_ib_create_qp qp
;
1065 struct mlx4_ib_create_wq wq
;
1071 copy_len
= (src
== MLX4_IB_QP_SRC
) ?
1072 sizeof(struct mlx4_ib_create_qp
) :
1073 min(sizeof(struct mlx4_ib_create_wq
), udata
->inlen
);
1075 if (ib_copy_from_udata(&ucmd
, udata
, copy_len
)) {
1080 if (src
== MLX4_IB_RWQ_SRC
) {
1081 if (ucmd
.wq
.comp_mask
|| ucmd
.wq
.reserved
[0] ||
1082 ucmd
.wq
.reserved
[1] || ucmd
.wq
.reserved
[2]) {
1083 pr_debug("user command isn't supported\n");
1088 if (ucmd
.wq
.log_range_size
>
1089 ilog2(dev
->dev
->caps
.max_rss_tbl_sz
)) {
1090 pr_debug("WQN range size must be equal or smaller than %d\n",
1091 dev
->dev
->caps
.max_rss_tbl_sz
);
1095 range_size
= 1 << ucmd
.wq
.log_range_size
;
1097 qp
->inl_recv_sz
= ucmd
.qp
.inl_recv_sz
;
1100 if (init_attr
->create_flags
& IB_QP_CREATE_SCATTER_FCS
) {
1101 if (!(dev
->dev
->caps
.flags
&
1102 MLX4_DEV_CAP_FLAG_FCS_KEEP
)) {
1103 pr_debug("scatter FCS is unsupported\n");
1108 qp
->flags
|= MLX4_IB_QP_SCATTER_FCS
;
1111 err
= set_rq_size(dev
, &init_attr
->cap
, !!pd
->uobject
,
1112 qp_has_rq(init_attr
), qp
, qp
->inl_recv_sz
);
1116 if (src
== MLX4_IB_QP_SRC
) {
1117 qp
->sq_no_prefetch
= ucmd
.qp
.sq_no_prefetch
;
1119 err
= set_user_sq_size(dev
, qp
,
1120 (struct mlx4_ib_create_qp
*)
1125 qp
->sq_no_prefetch
= 1;
1127 qp
->sq
.wqe_shift
= MLX4_IB_MIN_SQ_STRIDE
;
1128 /* Allocated buffer expects to have at least that SQ
1131 qp
->buf_size
= (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
) +
1132 (qp
->sq
.wqe_cnt
<< qp
->sq
.wqe_shift
);
1135 qp
->umem
= ib_umem_get(pd
->uobject
->context
,
1136 (src
== MLX4_IB_QP_SRC
) ? ucmd
.qp
.buf_addr
:
1137 ucmd
.wq
.buf_addr
, qp
->buf_size
, 0, 0);
1138 if (IS_ERR(qp
->umem
)) {
1139 err
= PTR_ERR(qp
->umem
);
1143 n
= ib_umem_page_count(qp
->umem
);
1144 shift
= mlx4_ib_umem_calc_optimal_mtt_size(qp
->umem
, 0, &n
);
1145 err
= mlx4_mtt_init(dev
->dev
, n
, shift
, &qp
->mtt
);
1150 err
= mlx4_ib_umem_write_mtt(dev
, &qp
->mtt
, qp
->umem
);
1154 if (qp_has_rq(init_attr
)) {
1155 err
= mlx4_ib_db_map_user(to_mucontext(pd
->uobject
->context
),
1156 (src
== MLX4_IB_QP_SRC
) ? ucmd
.qp
.db_addr
:
1157 ucmd
.wq
.db_addr
, &qp
->db
);
1161 qp
->mqp
.usage
= MLX4_RES_USAGE_USER_VERBS
;
1163 err
= set_rq_size(dev
, &init_attr
->cap
, !!pd
->uobject
,
1164 qp_has_rq(init_attr
), qp
, 0);
1168 qp
->sq_no_prefetch
= 0;
1170 if (init_attr
->create_flags
& IB_QP_CREATE_IPOIB_UD_LSO
)
1171 qp
->flags
|= MLX4_IB_QP_LSO
;
1173 if (init_attr
->create_flags
& IB_QP_CREATE_NETIF_QP
) {
1174 if (dev
->steering_support
==
1175 MLX4_STEERING_MODE_DEVICE_MANAGED
)
1176 qp
->flags
|= MLX4_IB_QP_NETIF
;
1181 memcpy(&backup_cap
, &init_attr
->cap
, sizeof(backup_cap
));
1182 err
= set_kernel_sq_size(dev
, &init_attr
->cap
,
1187 if (qp_has_rq(init_attr
)) {
1188 err
= mlx4_db_alloc(dev
->dev
, &qp
->db
, 0);
1195 if (mlx4_buf_alloc(dev
->dev
, qp
->buf_size
, qp
->buf_size
,
1197 memcpy(&init_attr
->cap
, &backup_cap
,
1198 sizeof(backup_cap
));
1199 err
= set_kernel_sq_size(dev
, &init_attr
->cap
, qp_type
,
1204 if (mlx4_buf_alloc(dev
->dev
, qp
->buf_size
,
1205 PAGE_SIZE
* 2, &qp
->buf
)) {
1211 err
= mlx4_mtt_init(dev
->dev
, qp
->buf
.npages
, qp
->buf
.page_shift
,
1216 err
= mlx4_buf_write_mtt(dev
->dev
, &qp
->mtt
, &qp
->buf
);
1220 qp
->sq
.wrid
= kvmalloc_array(qp
->sq
.wqe_cnt
,
1221 sizeof(u64
), GFP_KERNEL
);
1222 qp
->rq
.wrid
= kvmalloc_array(qp
->rq
.wqe_cnt
,
1223 sizeof(u64
), GFP_KERNEL
);
1224 if (!qp
->sq
.wrid
|| !qp
->rq
.wrid
) {
1228 qp
->mqp
.usage
= MLX4_RES_USAGE_DRIVER
;
1232 if (qp
->mlx4_ib_qp_type
& (MLX4_IB_QPT_PROXY_SMI_OWNER
|
1233 MLX4_IB_QPT_PROXY_SMI
| MLX4_IB_QPT_PROXY_GSI
)) {
1234 if (alloc_proxy_bufs(pd
->device
, qp
)) {
1239 } else if (src
== MLX4_IB_RWQ_SRC
) {
1240 err
= mlx4_ib_alloc_wqn(to_mucontext(pd
->uobject
->context
), qp
,
1245 /* Raw packet QPNs may not have bits 6,7 set in their qp_num;
1246 * otherwise, the WQE BlueFlame setup flow wrongly causes
1247 * VLAN insertion. */
1248 if (init_attr
->qp_type
== IB_QPT_RAW_PACKET
)
1249 err
= mlx4_qp_reserve_range(dev
->dev
, 1, 1, &qpn
,
1250 (init_attr
->cap
.max_send_wr
?
1251 MLX4_RESERVE_ETH_BF_QP
: 0) |
1252 (init_attr
->cap
.max_recv_wr
?
1253 MLX4_RESERVE_A0_QP
: 0),
1256 if (qp
->flags
& MLX4_IB_QP_NETIF
)
1257 err
= mlx4_ib_steer_qp_alloc(dev
, 1, &qpn
);
1259 err
= mlx4_qp_reserve_range(dev
->dev
, 1, 1,
1260 &qpn
, 0, qp
->mqp
.usage
);
1265 if (init_attr
->create_flags
& IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
)
1266 qp
->flags
|= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
;
1268 err
= mlx4_qp_alloc(dev
->dev
, qpn
, &qp
->mqp
);
1272 if (init_attr
->qp_type
== IB_QPT_XRC_TGT
)
1273 qp
->mqp
.qpn
|= (1 << 23);
1276 * Hardware wants QPN written in big-endian order (after
1277 * shifting) for send doorbell. Precompute this value to save
1278 * a little bit when posting sends.
1280 qp
->doorbell_qpn
= swab32(qp
->mqp
.qpn
<< 8);
1282 qp
->mqp
.event
= (src
== MLX4_IB_QP_SRC
) ? mlx4_ib_qp_event
:
1288 spin_lock_irqsave(&dev
->reset_flow_resource_lock
, flags
);
1289 mlx4_ib_lock_cqs(to_mcq(init_attr
->send_cq
),
1290 to_mcq(init_attr
->recv_cq
));
1291 /* Maintain device to QPs access, needed for further handling
1294 list_add_tail(&qp
->qps_list
, &dev
->qp_list
);
1295 /* Maintain CQ to QPs access, needed for further handling
1298 mcq
= to_mcq(init_attr
->send_cq
);
1299 list_add_tail(&qp
->cq_send_list
, &mcq
->send_qp_list
);
1300 mcq
= to_mcq(init_attr
->recv_cq
);
1301 list_add_tail(&qp
->cq_recv_list
, &mcq
->recv_qp_list
);
1302 mlx4_ib_unlock_cqs(to_mcq(init_attr
->send_cq
),
1303 to_mcq(init_attr
->recv_cq
));
1304 spin_unlock_irqrestore(&dev
->reset_flow_resource_lock
, flags
);
1309 if (qp
->flags
& MLX4_IB_QP_NETIF
)
1310 mlx4_ib_steer_qp_free(dev
, qpn
, 1);
1311 else if (src
== MLX4_IB_RWQ_SRC
)
1312 mlx4_ib_release_wqn(to_mucontext(pd
->uobject
->context
),
1315 mlx4_qp_release_range(dev
->dev
, qpn
, 1);
1318 if (qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_PROXY_GSI
)
1319 free_proxy_bufs(pd
->device
, qp
);
1322 if (qp_has_rq(init_attr
))
1323 mlx4_ib_db_unmap_user(to_mucontext(pd
->uobject
->context
), &qp
->db
);
1325 kvfree(qp
->sq
.wrid
);
1326 kvfree(qp
->rq
.wrid
);
1330 mlx4_mtt_cleanup(dev
->dev
, &qp
->mtt
);
1334 ib_umem_release(qp
->umem
);
1336 mlx4_buf_free(dev
->dev
, qp
->buf_size
, &qp
->buf
);
1339 if (!pd
->uobject
&& qp_has_rq(init_attr
))
1340 mlx4_db_free(dev
->dev
, &qp
->db
);
1345 else if (!*caller_qp
)
1350 static enum mlx4_qp_state
to_mlx4_state(enum ib_qp_state state
)
1353 case IB_QPS_RESET
: return MLX4_QP_STATE_RST
;
1354 case IB_QPS_INIT
: return MLX4_QP_STATE_INIT
;
1355 case IB_QPS_RTR
: return MLX4_QP_STATE_RTR
;
1356 case IB_QPS_RTS
: return MLX4_QP_STATE_RTS
;
1357 case IB_QPS_SQD
: return MLX4_QP_STATE_SQD
;
1358 case IB_QPS_SQE
: return MLX4_QP_STATE_SQER
;
1359 case IB_QPS_ERR
: return MLX4_QP_STATE_ERR
;
1364 static void mlx4_ib_lock_cqs(struct mlx4_ib_cq
*send_cq
, struct mlx4_ib_cq
*recv_cq
)
1365 __acquires(&send_cq
->lock
) __acquires(&recv_cq
->lock
)
1367 if (send_cq
== recv_cq
) {
1368 spin_lock(&send_cq
->lock
);
1369 __acquire(&recv_cq
->lock
);
1370 } else if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
1371 spin_lock(&send_cq
->lock
);
1372 spin_lock_nested(&recv_cq
->lock
, SINGLE_DEPTH_NESTING
);
1374 spin_lock(&recv_cq
->lock
);
1375 spin_lock_nested(&send_cq
->lock
, SINGLE_DEPTH_NESTING
);
1379 static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq
*send_cq
, struct mlx4_ib_cq
*recv_cq
)
1380 __releases(&send_cq
->lock
) __releases(&recv_cq
->lock
)
1382 if (send_cq
== recv_cq
) {
1383 __release(&recv_cq
->lock
);
1384 spin_unlock(&send_cq
->lock
);
1385 } else if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
1386 spin_unlock(&recv_cq
->lock
);
1387 spin_unlock(&send_cq
->lock
);
1389 spin_unlock(&send_cq
->lock
);
1390 spin_unlock(&recv_cq
->lock
);
1394 static void del_gid_entries(struct mlx4_ib_qp
*qp
)
1396 struct mlx4_ib_gid_entry
*ge
, *tmp
;
1398 list_for_each_entry_safe(ge
, tmp
, &qp
->gid_list
, list
) {
1399 list_del(&ge
->list
);
1404 static struct mlx4_ib_pd
*get_pd(struct mlx4_ib_qp
*qp
)
1406 if (qp
->ibqp
.qp_type
== IB_QPT_XRC_TGT
)
1407 return to_mpd(to_mxrcd(qp
->ibqp
.xrcd
)->pd
);
1409 return to_mpd(qp
->ibqp
.pd
);
1412 static void get_cqs(struct mlx4_ib_qp
*qp
, enum mlx4_ib_source_type src
,
1413 struct mlx4_ib_cq
**send_cq
, struct mlx4_ib_cq
**recv_cq
)
1415 switch (qp
->ibqp
.qp_type
) {
1416 case IB_QPT_XRC_TGT
:
1417 *send_cq
= to_mcq(to_mxrcd(qp
->ibqp
.xrcd
)->cq
);
1418 *recv_cq
= *send_cq
;
1420 case IB_QPT_XRC_INI
:
1421 *send_cq
= to_mcq(qp
->ibqp
.send_cq
);
1422 *recv_cq
= *send_cq
;
1425 *recv_cq
= (src
== MLX4_IB_QP_SRC
) ? to_mcq(qp
->ibqp
.recv_cq
) :
1426 to_mcq(qp
->ibwq
.cq
);
1427 *send_cq
= (src
== MLX4_IB_QP_SRC
) ? to_mcq(qp
->ibqp
.send_cq
) :
1433 static void destroy_qp_rss(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
)
1435 if (qp
->state
!= IB_QPS_RESET
) {
1438 for (i
= 0; i
< (1 << qp
->ibqp
.rwq_ind_tbl
->log_ind_tbl_size
);
1440 struct ib_wq
*ibwq
= qp
->ibqp
.rwq_ind_tbl
->ind_tbl
[i
];
1441 struct mlx4_ib_qp
*wq
= to_mqp((struct ib_qp
*)ibwq
);
1443 mutex_lock(&wq
->mutex
);
1447 mutex_unlock(&wq
->mutex
);
1450 if (mlx4_qp_modify(dev
->dev
, NULL
, to_mlx4_state(qp
->state
),
1451 MLX4_QP_STATE_RST
, NULL
, 0, 0, &qp
->mqp
))
1452 pr_warn("modify QP %06x to RESET failed.\n",
1456 mlx4_qp_remove(dev
->dev
, &qp
->mqp
);
1457 mlx4_qp_free(dev
->dev
, &qp
->mqp
);
1458 mlx4_qp_release_range(dev
->dev
, qp
->mqp
.qpn
, 1);
1459 del_gid_entries(qp
);
1463 static void destroy_qp_common(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
,
1464 enum mlx4_ib_source_type src
, int is_user
)
1466 struct mlx4_ib_cq
*send_cq
, *recv_cq
;
1467 unsigned long flags
;
1469 if (qp
->state
!= IB_QPS_RESET
) {
1470 if (mlx4_qp_modify(dev
->dev
, NULL
, to_mlx4_state(qp
->state
),
1471 MLX4_QP_STATE_RST
, NULL
, 0, 0, &qp
->mqp
))
1472 pr_warn("modify QP %06x to RESET failed.\n",
1474 if (qp
->pri
.smac
|| (!qp
->pri
.smac
&& qp
->pri
.smac_port
)) {
1475 mlx4_unregister_mac(dev
->dev
, qp
->pri
.smac_port
, qp
->pri
.smac
);
1477 qp
->pri
.smac_port
= 0;
1480 mlx4_unregister_mac(dev
->dev
, qp
->alt
.smac_port
, qp
->alt
.smac
);
1483 if (qp
->pri
.vid
< 0x1000) {
1484 mlx4_unregister_vlan(dev
->dev
, qp
->pri
.vlan_port
, qp
->pri
.vid
);
1485 qp
->pri
.vid
= 0xFFFF;
1486 qp
->pri
.candidate_vid
= 0xFFFF;
1487 qp
->pri
.update_vid
= 0;
1489 if (qp
->alt
.vid
< 0x1000) {
1490 mlx4_unregister_vlan(dev
->dev
, qp
->alt
.vlan_port
, qp
->alt
.vid
);
1491 qp
->alt
.vid
= 0xFFFF;
1492 qp
->alt
.candidate_vid
= 0xFFFF;
1493 qp
->alt
.update_vid
= 0;
1497 get_cqs(qp
, src
, &send_cq
, &recv_cq
);
1499 spin_lock_irqsave(&dev
->reset_flow_resource_lock
, flags
);
1500 mlx4_ib_lock_cqs(send_cq
, recv_cq
);
1502 /* del from lists under both locks above to protect reset flow paths */
1503 list_del(&qp
->qps_list
);
1504 list_del(&qp
->cq_send_list
);
1505 list_del(&qp
->cq_recv_list
);
1507 __mlx4_ib_cq_clean(recv_cq
, qp
->mqp
.qpn
,
1508 qp
->ibqp
.srq
? to_msrq(qp
->ibqp
.srq
): NULL
);
1509 if (send_cq
!= recv_cq
)
1510 __mlx4_ib_cq_clean(send_cq
, qp
->mqp
.qpn
, NULL
);
1513 mlx4_qp_remove(dev
->dev
, &qp
->mqp
);
1515 mlx4_ib_unlock_cqs(send_cq
, recv_cq
);
1516 spin_unlock_irqrestore(&dev
->reset_flow_resource_lock
, flags
);
1518 mlx4_qp_free(dev
->dev
, &qp
->mqp
);
1520 if (!is_sqp(dev
, qp
) && !is_tunnel_qp(dev
, qp
)) {
1521 if (qp
->flags
& MLX4_IB_QP_NETIF
)
1522 mlx4_ib_steer_qp_free(dev
, qp
->mqp
.qpn
, 1);
1523 else if (src
== MLX4_IB_RWQ_SRC
)
1524 mlx4_ib_release_wqn(to_mucontext(
1525 qp
->ibwq
.uobject
->context
), qp
, 1);
1527 mlx4_qp_release_range(dev
->dev
, qp
->mqp
.qpn
, 1);
1530 mlx4_mtt_cleanup(dev
->dev
, &qp
->mtt
);
1533 if (qp
->rq
.wqe_cnt
) {
1534 struct mlx4_ib_ucontext
*mcontext
= !src
?
1535 to_mucontext(qp
->ibqp
.uobject
->context
) :
1536 to_mucontext(qp
->ibwq
.uobject
->context
);
1537 mlx4_ib_db_unmap_user(mcontext
, &qp
->db
);
1539 ib_umem_release(qp
->umem
);
1541 kvfree(qp
->sq
.wrid
);
1542 kvfree(qp
->rq
.wrid
);
1543 if (qp
->mlx4_ib_qp_type
& (MLX4_IB_QPT_PROXY_SMI_OWNER
|
1544 MLX4_IB_QPT_PROXY_SMI
| MLX4_IB_QPT_PROXY_GSI
))
1545 free_proxy_bufs(&dev
->ib_dev
, qp
);
1546 mlx4_buf_free(dev
->dev
, qp
->buf_size
, &qp
->buf
);
1548 mlx4_db_free(dev
->dev
, &qp
->db
);
1551 del_gid_entries(qp
);
1554 static u32
get_sqp_num(struct mlx4_ib_dev
*dev
, struct ib_qp_init_attr
*attr
)
1557 if (!mlx4_is_mfunc(dev
->dev
) ||
1558 (mlx4_is_master(dev
->dev
) &&
1559 attr
->create_flags
& MLX4_IB_SRIOV_SQP
)) {
1560 return dev
->dev
->phys_caps
.base_sqpn
+
1561 (attr
->qp_type
== IB_QPT_SMI
? 0 : 2) +
1564 /* PF or VF -- creating proxies */
1565 if (attr
->qp_type
== IB_QPT_SMI
)
1566 return dev
->dev
->caps
.spec_qps
[attr
->port_num
- 1].qp0_proxy
;
1568 return dev
->dev
->caps
.spec_qps
[attr
->port_num
- 1].qp1_proxy
;
1571 static struct ib_qp
*_mlx4_ib_create_qp(struct ib_pd
*pd
,
1572 struct ib_qp_init_attr
*init_attr
,
1573 struct ib_udata
*udata
)
1575 struct mlx4_ib_qp
*qp
= NULL
;
1577 int sup_u_create_flags
= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
;
1580 if (init_attr
->rwq_ind_tbl
)
1581 return _mlx4_ib_create_qp_rss(pd
, init_attr
, udata
);
1584 * We only support LSO, vendor flag1, and multicast loopback blocking,
1585 * and only for kernel UD QPs.
1587 if (init_attr
->create_flags
& ~(MLX4_IB_QP_LSO
|
1588 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
|
1589 MLX4_IB_SRIOV_TUNNEL_QP
|
1592 MLX4_IB_QP_CREATE_ROCE_V2_GSI
))
1593 return ERR_PTR(-EINVAL
);
1595 if (init_attr
->create_flags
& IB_QP_CREATE_NETIF_QP
) {
1596 if (init_attr
->qp_type
!= IB_QPT_UD
)
1597 return ERR_PTR(-EINVAL
);
1600 if (init_attr
->create_flags
) {
1601 if (udata
&& init_attr
->create_flags
& ~(sup_u_create_flags
))
1602 return ERR_PTR(-EINVAL
);
1604 if ((init_attr
->create_flags
& ~(MLX4_IB_SRIOV_SQP
|
1605 MLX4_IB_QP_CREATE_ROCE_V2_GSI
|
1606 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
) &&
1607 init_attr
->qp_type
!= IB_QPT_UD
) ||
1608 (init_attr
->create_flags
& MLX4_IB_SRIOV_SQP
&&
1609 init_attr
->qp_type
> IB_QPT_GSI
) ||
1610 (init_attr
->create_flags
& MLX4_IB_QP_CREATE_ROCE_V2_GSI
&&
1611 init_attr
->qp_type
!= IB_QPT_GSI
))
1612 return ERR_PTR(-EINVAL
);
1615 switch (init_attr
->qp_type
) {
1616 case IB_QPT_XRC_TGT
:
1617 pd
= to_mxrcd(init_attr
->xrcd
)->pd
;
1618 xrcdn
= to_mxrcd(init_attr
->xrcd
)->xrcdn
;
1619 init_attr
->send_cq
= to_mxrcd(init_attr
->xrcd
)->cq
;
1621 case IB_QPT_XRC_INI
:
1622 if (!(to_mdev(pd
->device
)->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_XRC
))
1623 return ERR_PTR(-ENOSYS
);
1624 init_attr
->recv_cq
= init_attr
->send_cq
;
1628 case IB_QPT_RAW_PACKET
:
1629 qp
= kzalloc(sizeof(*qp
), GFP_KERNEL
);
1631 return ERR_PTR(-ENOMEM
);
1632 qp
->pri
.vid
= 0xFFFF;
1633 qp
->alt
.vid
= 0xFFFF;
1637 err
= create_qp_common(to_mdev(pd
->device
), pd
, MLX4_IB_QP_SRC
,
1638 init_attr
, udata
, 0, &qp
);
1641 return ERR_PTR(err
);
1644 qp
->ibqp
.qp_num
= qp
->mqp
.qpn
;
1654 /* Userspace is not allowed to create special QPs: */
1656 return ERR_PTR(-EINVAL
);
1657 if (init_attr
->create_flags
& MLX4_IB_QP_CREATE_ROCE_V2_GSI
) {
1658 int res
= mlx4_qp_reserve_range(to_mdev(pd
->device
)->dev
,
1660 MLX4_RES_USAGE_DRIVER
);
1663 return ERR_PTR(res
);
1665 sqpn
= get_sqp_num(to_mdev(pd
->device
), init_attr
);
1668 err
= create_qp_common(to_mdev(pd
->device
), pd
, MLX4_IB_QP_SRC
,
1669 init_attr
, udata
, sqpn
, &qp
);
1671 return ERR_PTR(err
);
1673 qp
->port
= init_attr
->port_num
;
1674 qp
->ibqp
.qp_num
= init_attr
->qp_type
== IB_QPT_SMI
? 0 :
1675 init_attr
->create_flags
& MLX4_IB_QP_CREATE_ROCE_V2_GSI
? sqpn
: 1;
1679 /* Don't support raw QPs */
1680 return ERR_PTR(-EINVAL
);
1686 struct ib_qp
*mlx4_ib_create_qp(struct ib_pd
*pd
,
1687 struct ib_qp_init_attr
*init_attr
,
1688 struct ib_udata
*udata
) {
1689 struct ib_device
*device
= pd
? pd
->device
: init_attr
->xrcd
->device
;
1691 struct mlx4_ib_dev
*dev
= to_mdev(device
);
1693 ibqp
= _mlx4_ib_create_qp(pd
, init_attr
, udata
);
1695 if (!IS_ERR(ibqp
) &&
1696 (init_attr
->qp_type
== IB_QPT_GSI
) &&
1697 !(init_attr
->create_flags
& MLX4_IB_QP_CREATE_ROCE_V2_GSI
)) {
1698 struct mlx4_ib_sqp
*sqp
= to_msqp((to_mqp(ibqp
)));
1699 int is_eth
= rdma_cap_eth_ah(&dev
->ib_dev
, init_attr
->port_num
);
1702 dev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_ROCE_V1_V2
) {
1703 init_attr
->create_flags
|= MLX4_IB_QP_CREATE_ROCE_V2_GSI
;
1704 sqp
->roce_v2_gsi
= ib_create_qp(pd
, init_attr
);
1706 if (IS_ERR(sqp
->roce_v2_gsi
)) {
1707 pr_err("Failed to create GSI QP for RoCEv2 (%ld)\n", PTR_ERR(sqp
->roce_v2_gsi
));
1708 sqp
->roce_v2_gsi
= NULL
;
1710 sqp
= to_msqp(to_mqp(sqp
->roce_v2_gsi
));
1711 sqp
->qp
.flags
|= MLX4_IB_ROCE_V2_GSI_QP
;
1714 init_attr
->create_flags
&= ~MLX4_IB_QP_CREATE_ROCE_V2_GSI
;
1720 static int _mlx4_ib_destroy_qp(struct ib_qp
*qp
)
1722 struct mlx4_ib_dev
*dev
= to_mdev(qp
->device
);
1723 struct mlx4_ib_qp
*mqp
= to_mqp(qp
);
1725 if (is_qp0(dev
, mqp
))
1726 mlx4_CLOSE_PORT(dev
->dev
, mqp
->port
);
1728 if (mqp
->mlx4_ib_qp_type
== MLX4_IB_QPT_PROXY_GSI
&&
1729 dev
->qp1_proxy
[mqp
->port
- 1] == mqp
) {
1730 mutex_lock(&dev
->qp1_proxy_lock
[mqp
->port
- 1]);
1731 dev
->qp1_proxy
[mqp
->port
- 1] = NULL
;
1732 mutex_unlock(&dev
->qp1_proxy_lock
[mqp
->port
- 1]);
1735 if (mqp
->counter_index
)
1736 mlx4_ib_free_qp_counter(dev
, mqp
);
1738 if (qp
->rwq_ind_tbl
) {
1739 destroy_qp_rss(dev
, mqp
);
1741 struct mlx4_ib_pd
*pd
;
1744 destroy_qp_common(dev
, mqp
, MLX4_IB_QP_SRC
, !!pd
->ibpd
.uobject
);
1747 if (is_sqp(dev
, mqp
))
1748 kfree(to_msqp(mqp
));
1755 int mlx4_ib_destroy_qp(struct ib_qp
*qp
)
1757 struct mlx4_ib_qp
*mqp
= to_mqp(qp
);
1759 if (mqp
->mlx4_ib_qp_type
== MLX4_IB_QPT_GSI
) {
1760 struct mlx4_ib_sqp
*sqp
= to_msqp(mqp
);
1762 if (sqp
->roce_v2_gsi
)
1763 ib_destroy_qp(sqp
->roce_v2_gsi
);
1766 return _mlx4_ib_destroy_qp(qp
);
1769 static int to_mlx4_st(struct mlx4_ib_dev
*dev
, enum mlx4_ib_qp_type type
)
1772 case MLX4_IB_QPT_RC
: return MLX4_QP_ST_RC
;
1773 case MLX4_IB_QPT_UC
: return MLX4_QP_ST_UC
;
1774 case MLX4_IB_QPT_UD
: return MLX4_QP_ST_UD
;
1775 case MLX4_IB_QPT_XRC_INI
:
1776 case MLX4_IB_QPT_XRC_TGT
: return MLX4_QP_ST_XRC
;
1777 case MLX4_IB_QPT_SMI
:
1778 case MLX4_IB_QPT_GSI
:
1779 case MLX4_IB_QPT_RAW_PACKET
: return MLX4_QP_ST_MLX
;
1781 case MLX4_IB_QPT_PROXY_SMI_OWNER
:
1782 case MLX4_IB_QPT_TUN_SMI_OWNER
: return (mlx4_is_mfunc(dev
->dev
) ?
1783 MLX4_QP_ST_MLX
: -1);
1784 case MLX4_IB_QPT_PROXY_SMI
:
1785 case MLX4_IB_QPT_TUN_SMI
:
1786 case MLX4_IB_QPT_PROXY_GSI
:
1787 case MLX4_IB_QPT_TUN_GSI
: return (mlx4_is_mfunc(dev
->dev
) ?
1788 MLX4_QP_ST_UD
: -1);
1793 static __be32
to_mlx4_access_flags(struct mlx4_ib_qp
*qp
, const struct ib_qp_attr
*attr
,
1798 u32 hw_access_flags
= 0;
1800 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
1801 dest_rd_atomic
= attr
->max_dest_rd_atomic
;
1803 dest_rd_atomic
= qp
->resp_depth
;
1805 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
1806 access_flags
= attr
->qp_access_flags
;
1808 access_flags
= qp
->atomic_rd_en
;
1810 if (!dest_rd_atomic
)
1811 access_flags
&= IB_ACCESS_REMOTE_WRITE
;
1813 if (access_flags
& IB_ACCESS_REMOTE_READ
)
1814 hw_access_flags
|= MLX4_QP_BIT_RRE
;
1815 if (access_flags
& IB_ACCESS_REMOTE_ATOMIC
)
1816 hw_access_flags
|= MLX4_QP_BIT_RAE
;
1817 if (access_flags
& IB_ACCESS_REMOTE_WRITE
)
1818 hw_access_flags
|= MLX4_QP_BIT_RWE
;
1820 return cpu_to_be32(hw_access_flags
);
1823 static void store_sqp_attrs(struct mlx4_ib_sqp
*sqp
, const struct ib_qp_attr
*attr
,
1826 if (attr_mask
& IB_QP_PKEY_INDEX
)
1827 sqp
->pkey_index
= attr
->pkey_index
;
1828 if (attr_mask
& IB_QP_QKEY
)
1829 sqp
->qkey
= attr
->qkey
;
1830 if (attr_mask
& IB_QP_SQ_PSN
)
1831 sqp
->send_psn
= attr
->sq_psn
;
1834 static void mlx4_set_sched(struct mlx4_qp_path
*path
, u8 port
)
1836 path
->sched_queue
= (path
->sched_queue
& 0xbf) | ((port
- 1) << 6);
1839 static int _mlx4_set_path(struct mlx4_ib_dev
*dev
,
1840 const struct rdma_ah_attr
*ah
,
1841 u64 smac
, u16 vlan_tag
, struct mlx4_qp_path
*path
,
1842 struct mlx4_roce_smac_vlan_info
*smac_info
, u8 port
)
1848 path
->grh_mylmc
= rdma_ah_get_path_bits(ah
) & 0x7f;
1849 path
->rlid
= cpu_to_be16(rdma_ah_get_dlid(ah
));
1850 if (rdma_ah_get_static_rate(ah
)) {
1851 path
->static_rate
= rdma_ah_get_static_rate(ah
) +
1852 MLX4_STAT_RATE_OFFSET
;
1853 while (path
->static_rate
> IB_RATE_2_5_GBPS
+ MLX4_STAT_RATE_OFFSET
&&
1854 !(1 << path
->static_rate
& dev
->dev
->caps
.stat_rate_support
))
1855 --path
->static_rate
;
1857 path
->static_rate
= 0;
1859 if (rdma_ah_get_ah_flags(ah
) & IB_AH_GRH
) {
1860 const struct ib_global_route
*grh
= rdma_ah_read_grh(ah
);
1861 int real_sgid_index
=
1862 mlx4_ib_gid_index_to_real_index(dev
, grh
->sgid_attr
);
1864 if (real_sgid_index
< 0)
1865 return real_sgid_index
;
1866 if (real_sgid_index
>= dev
->dev
->caps
.gid_table_len
[port
]) {
1867 pr_err("sgid_index (%u) too large. max is %d\n",
1868 real_sgid_index
, dev
->dev
->caps
.gid_table_len
[port
] - 1);
1872 path
->grh_mylmc
|= 1 << 7;
1873 path
->mgid_index
= real_sgid_index
;
1874 path
->hop_limit
= grh
->hop_limit
;
1875 path
->tclass_flowlabel
=
1876 cpu_to_be32((grh
->traffic_class
<< 20) |
1878 memcpy(path
->rgid
, grh
->dgid
.raw
, 16);
1881 if (ah
->type
== RDMA_AH_ATTR_TYPE_ROCE
) {
1882 if (!(rdma_ah_get_ah_flags(ah
) & IB_AH_GRH
))
1885 path
->sched_queue
= MLX4_IB_DEFAULT_SCHED_QUEUE
|
1886 ((port
- 1) << 6) | ((rdma_ah_get_sl(ah
) & 7) << 3);
1888 path
->feup
|= MLX4_FEUP_FORCE_ETH_UP
;
1889 if (vlan_tag
< 0x1000) {
1890 if (smac_info
->vid
< 0x1000) {
1891 /* both valid vlan ids */
1892 if (smac_info
->vid
!= vlan_tag
) {
1893 /* different VIDs. unreg old and reg new */
1894 err
= mlx4_register_vlan(dev
->dev
, port
, vlan_tag
, &vidx
);
1897 smac_info
->candidate_vid
= vlan_tag
;
1898 smac_info
->candidate_vlan_index
= vidx
;
1899 smac_info
->candidate_vlan_port
= port
;
1900 smac_info
->update_vid
= 1;
1901 path
->vlan_index
= vidx
;
1903 path
->vlan_index
= smac_info
->vlan_index
;
1906 /* no current vlan tag in qp */
1907 err
= mlx4_register_vlan(dev
->dev
, port
, vlan_tag
, &vidx
);
1910 smac_info
->candidate_vid
= vlan_tag
;
1911 smac_info
->candidate_vlan_index
= vidx
;
1912 smac_info
->candidate_vlan_port
= port
;
1913 smac_info
->update_vid
= 1;
1914 path
->vlan_index
= vidx
;
1916 path
->feup
|= MLX4_FVL_FORCE_ETH_VLAN
;
1919 /* have current vlan tag. unregister it at modify-qp success */
1920 if (smac_info
->vid
< 0x1000) {
1921 smac_info
->candidate_vid
= 0xFFFF;
1922 smac_info
->update_vid
= 1;
1926 /* get smac_index for RoCE use.
1927 * If no smac was yet assigned, register one.
1928 * If one was already assigned, but the new mac differs,
1929 * unregister the old one and register the new one.
1931 if ((!smac_info
->smac
&& !smac_info
->smac_port
) ||
1932 smac_info
->smac
!= smac
) {
1933 /* register candidate now, unreg if needed, after success */
1934 smac_index
= mlx4_register_mac(dev
->dev
, port
, smac
);
1935 if (smac_index
>= 0) {
1936 smac_info
->candidate_smac_index
= smac_index
;
1937 smac_info
->candidate_smac
= smac
;
1938 smac_info
->candidate_smac_port
= port
;
1943 smac_index
= smac_info
->smac_index
;
1945 memcpy(path
->dmac
, ah
->roce
.dmac
, 6);
1946 path
->ackto
= MLX4_IB_LINK_TYPE_ETH
;
1947 /* put MAC table smac index for IBoE */
1948 path
->grh_mylmc
= (u8
) (smac_index
) | 0x80;
1950 path
->sched_queue
= MLX4_IB_DEFAULT_SCHED_QUEUE
|
1951 ((port
- 1) << 6) | ((rdma_ah_get_sl(ah
) & 0xf) << 2);
1957 static int mlx4_set_path(struct mlx4_ib_dev
*dev
, const struct ib_qp_attr
*qp
,
1958 enum ib_qp_attr_mask qp_attr_mask
,
1959 struct mlx4_ib_qp
*mqp
,
1960 struct mlx4_qp_path
*path
, u8 port
,
1961 u16 vlan_id
, u8
*smac
)
1963 return _mlx4_set_path(dev
, &qp
->ah_attr
,
1964 mlx4_mac_to_u64(smac
),
1966 path
, &mqp
->pri
, port
);
1969 static int mlx4_set_alt_path(struct mlx4_ib_dev
*dev
,
1970 const struct ib_qp_attr
*qp
,
1971 enum ib_qp_attr_mask qp_attr_mask
,
1972 struct mlx4_ib_qp
*mqp
,
1973 struct mlx4_qp_path
*path
, u8 port
)
1975 return _mlx4_set_path(dev
, &qp
->alt_ah_attr
,
1978 path
, &mqp
->alt
, port
);
1981 static void update_mcg_macs(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
)
1983 struct mlx4_ib_gid_entry
*ge
, *tmp
;
1985 list_for_each_entry_safe(ge
, tmp
, &qp
->gid_list
, list
) {
1986 if (!ge
->added
&& mlx4_ib_add_mc(dev
, qp
, &ge
->gid
)) {
1988 ge
->port
= qp
->port
;
1993 static int handle_eth_ud_smac_index(struct mlx4_ib_dev
*dev
,
1994 struct mlx4_ib_qp
*qp
,
1995 struct mlx4_qp_context
*context
)
2000 u64_mac
= atomic64_read(&dev
->iboe
.mac
[qp
->port
- 1]);
2002 context
->pri_path
.sched_queue
= MLX4_IB_DEFAULT_SCHED_QUEUE
| ((qp
->port
- 1) << 6);
2003 if (!qp
->pri
.smac
&& !qp
->pri
.smac_port
) {
2004 smac_index
= mlx4_register_mac(dev
->dev
, qp
->port
, u64_mac
);
2005 if (smac_index
>= 0) {
2006 qp
->pri
.candidate_smac_index
= smac_index
;
2007 qp
->pri
.candidate_smac
= u64_mac
;
2008 qp
->pri
.candidate_smac_port
= qp
->port
;
2009 context
->pri_path
.grh_mylmc
= 0x80 | (u8
) smac_index
;
2017 static int create_qp_lb_counter(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
)
2019 struct counter_index
*new_counter_index
;
2023 if (rdma_port_get_link_layer(&dev
->ib_dev
, qp
->port
) !=
2024 IB_LINK_LAYER_ETHERNET
||
2025 !(qp
->flags
& MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
) ||
2026 !(dev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_LB_SRC_CHK
))
2029 err
= mlx4_counter_alloc(dev
->dev
, &tmp_idx
, MLX4_RES_USAGE_DRIVER
);
2033 new_counter_index
= kmalloc(sizeof(*new_counter_index
), GFP_KERNEL
);
2034 if (!new_counter_index
) {
2035 mlx4_counter_free(dev
->dev
, tmp_idx
);
2039 new_counter_index
->index
= tmp_idx
;
2040 new_counter_index
->allocated
= 1;
2041 qp
->counter_index
= new_counter_index
;
2043 mutex_lock(&dev
->counters_table
[qp
->port
- 1].mutex
);
2044 list_add_tail(&new_counter_index
->list
,
2045 &dev
->counters_table
[qp
->port
- 1].counters_list
);
2046 mutex_unlock(&dev
->counters_table
[qp
->port
- 1].mutex
);
2052 MLX4_QPC_ROCE_MODE_1
= 0,
2053 MLX4_QPC_ROCE_MODE_2
= 2,
2054 MLX4_QPC_ROCE_MODE_UNDEFINED
= 0xff
2057 static u8
gid_type_to_qpc(enum ib_gid_type gid_type
)
2060 case IB_GID_TYPE_ROCE
:
2061 return MLX4_QPC_ROCE_MODE_1
;
2062 case IB_GID_TYPE_ROCE_UDP_ENCAP
:
2063 return MLX4_QPC_ROCE_MODE_2
;
2065 return MLX4_QPC_ROCE_MODE_UNDEFINED
;
2070 * Go over all RSS QP's childes (WQs) and apply their HW state according to
2071 * their logic state if the RSS QP is the first RSS QP associated for the WQ.
2073 static int bringup_rss_rwqs(struct ib_rwq_ind_table
*ind_tbl
, u8 port_num
)
2078 for (i
= 0; i
< (1 << ind_tbl
->log_ind_tbl_size
); i
++) {
2079 struct ib_wq
*ibwq
= ind_tbl
->ind_tbl
[i
];
2080 struct mlx4_ib_qp
*wq
= to_mqp((struct ib_qp
*)ibwq
);
2082 mutex_lock(&wq
->mutex
);
2084 /* Mlx4_ib restrictions:
2085 * WQ's is associated to a port according to the RSS QP it is
2087 * In case the WQ is associated to a different port by another
2088 * RSS QP, return a failure.
2090 if ((wq
->rss_usecnt
> 0) && (wq
->port
!= port_num
)) {
2092 mutex_unlock(&wq
->mutex
);
2095 wq
->port
= port_num
;
2096 if ((wq
->rss_usecnt
== 0) && (ibwq
->state
== IB_WQS_RDY
)) {
2097 err
= _mlx4_ib_modify_wq(ibwq
, IB_WQS_RDY
);
2099 mutex_unlock(&wq
->mutex
);
2105 mutex_unlock(&wq
->mutex
);
2111 for (j
= (i
- 1); j
>= 0; j
--) {
2112 struct ib_wq
*ibwq
= ind_tbl
->ind_tbl
[j
];
2113 struct mlx4_ib_qp
*wq
= to_mqp((struct ib_qp
*)ibwq
);
2115 mutex_lock(&wq
->mutex
);
2117 if ((wq
->rss_usecnt
== 1) &&
2118 (ibwq
->state
== IB_WQS_RDY
))
2119 if (_mlx4_ib_modify_wq(ibwq
, IB_WQS_RESET
))
2120 pr_warn("failed to reverse WQN=0x%06x\n",
2124 mutex_unlock(&wq
->mutex
);
2131 static void bring_down_rss_rwqs(struct ib_rwq_ind_table
*ind_tbl
)
2135 for (i
= 0; i
< (1 << ind_tbl
->log_ind_tbl_size
); i
++) {
2136 struct ib_wq
*ibwq
= ind_tbl
->ind_tbl
[i
];
2137 struct mlx4_ib_qp
*wq
= to_mqp((struct ib_qp
*)ibwq
);
2139 mutex_lock(&wq
->mutex
);
2141 if ((wq
->rss_usecnt
== 1) && (ibwq
->state
== IB_WQS_RDY
))
2142 if (_mlx4_ib_modify_wq(ibwq
, IB_WQS_RESET
))
2143 pr_warn("failed to reverse WQN=%x\n",
2147 mutex_unlock(&wq
->mutex
);
2151 static void fill_qp_rss_context(struct mlx4_qp_context
*context
,
2152 struct mlx4_ib_qp
*qp
)
2154 struct mlx4_rss_context
*rss_context
;
2156 rss_context
= (void *)context
+ offsetof(struct mlx4_qp_context
,
2157 pri_path
) + MLX4_RSS_OFFSET_IN_QPC_PRI_PATH
;
2159 rss_context
->base_qpn
= cpu_to_be32(qp
->rss_ctx
->base_qpn_tbl_sz
);
2160 rss_context
->default_qpn
=
2161 cpu_to_be32(qp
->rss_ctx
->base_qpn_tbl_sz
& 0xffffff);
2162 if (qp
->rss_ctx
->flags
& (MLX4_RSS_UDP_IPV4
| MLX4_RSS_UDP_IPV6
))
2163 rss_context
->base_qpn_udp
= rss_context
->default_qpn
;
2164 rss_context
->flags
= qp
->rss_ctx
->flags
;
2165 /* Currently support just toeplitz */
2166 rss_context
->hash_fn
= MLX4_RSS_HASH_TOP
;
2168 memcpy(rss_context
->rss_key
, qp
->rss_ctx
->rss_key
,
2169 MLX4_EN_RSS_KEY_SIZE
);
2172 static int __mlx4_ib_modify_qp(void *src
, enum mlx4_ib_source_type src_type
,
2173 const struct ib_qp_attr
*attr
, int attr_mask
,
2174 enum ib_qp_state cur_state
, enum ib_qp_state new_state
)
2176 struct ib_uobject
*ibuobject
;
2177 struct ib_srq
*ibsrq
;
2178 const struct ib_gid_attr
*gid_attr
= NULL
;
2179 struct ib_rwq_ind_table
*rwq_ind_tbl
;
2180 enum ib_qp_type qp_type
;
2181 struct mlx4_ib_dev
*dev
;
2182 struct mlx4_ib_qp
*qp
;
2183 struct mlx4_ib_pd
*pd
;
2184 struct mlx4_ib_cq
*send_cq
, *recv_cq
;
2185 struct mlx4_qp_context
*context
;
2186 enum mlx4_qp_optpar optpar
= 0;
2192 if (src_type
== MLX4_IB_RWQ_SRC
) {
2195 ibwq
= (struct ib_wq
*)src
;
2196 ibuobject
= ibwq
->uobject
;
2199 qp_type
= IB_QPT_RAW_PACKET
;
2200 qp
= to_mqp((struct ib_qp
*)ibwq
);
2201 dev
= to_mdev(ibwq
->device
);
2202 pd
= to_mpd(ibwq
->pd
);
2206 ibqp
= (struct ib_qp
*)src
;
2207 ibuobject
= ibqp
->uobject
;
2209 rwq_ind_tbl
= ibqp
->rwq_ind_tbl
;
2210 qp_type
= ibqp
->qp_type
;
2212 dev
= to_mdev(ibqp
->device
);
2216 /* APM is not supported under RoCE */
2217 if (attr_mask
& IB_QP_ALT_PATH
&&
2218 rdma_port_get_link_layer(&dev
->ib_dev
, qp
->port
) ==
2219 IB_LINK_LAYER_ETHERNET
)
2222 context
= kzalloc(sizeof *context
, GFP_KERNEL
);
2226 context
->flags
= cpu_to_be32((to_mlx4_state(new_state
) << 28) |
2227 (to_mlx4_st(dev
, qp
->mlx4_ib_qp_type
) << 16));
2229 if (!(attr_mask
& IB_QP_PATH_MIG_STATE
))
2230 context
->flags
|= cpu_to_be32(MLX4_QP_PM_MIGRATED
<< 11);
2232 optpar
|= MLX4_QP_OPTPAR_PM_STATE
;
2233 switch (attr
->path_mig_state
) {
2234 case IB_MIG_MIGRATED
:
2235 context
->flags
|= cpu_to_be32(MLX4_QP_PM_MIGRATED
<< 11);
2238 context
->flags
|= cpu_to_be32(MLX4_QP_PM_REARM
<< 11);
2241 context
->flags
|= cpu_to_be32(MLX4_QP_PM_ARMED
<< 11);
2246 if (qp
->inl_recv_sz
)
2247 context
->param3
|= cpu_to_be32(1 << 25);
2249 if (qp
->flags
& MLX4_IB_QP_SCATTER_FCS
)
2250 context
->param3
|= cpu_to_be32(1 << 29);
2252 if (qp_type
== IB_QPT_GSI
|| qp_type
== IB_QPT_SMI
)
2253 context
->mtu_msgmax
= (IB_MTU_4096
<< 5) | 11;
2254 else if (qp_type
== IB_QPT_RAW_PACKET
)
2255 context
->mtu_msgmax
= (MLX4_RAW_QP_MTU
<< 5) | MLX4_RAW_QP_MSGMAX
;
2256 else if (qp_type
== IB_QPT_UD
) {
2257 if (qp
->flags
& MLX4_IB_QP_LSO
)
2258 context
->mtu_msgmax
= (IB_MTU_4096
<< 5) |
2259 ilog2(dev
->dev
->caps
.max_gso_sz
);
2261 context
->mtu_msgmax
= (IB_MTU_4096
<< 5) | 13;
2262 } else if (attr_mask
& IB_QP_PATH_MTU
) {
2263 if (attr
->path_mtu
< IB_MTU_256
|| attr
->path_mtu
> IB_MTU_4096
) {
2264 pr_err("path MTU (%u) is invalid\n",
2268 context
->mtu_msgmax
= (attr
->path_mtu
<< 5) |
2269 ilog2(dev
->dev
->caps
.max_msg_sz
);
2272 if (!rwq_ind_tbl
) { /* PRM RSS receive side should be left zeros */
2274 context
->rq_size_stride
= ilog2(qp
->rq
.wqe_cnt
) << 3;
2275 context
->rq_size_stride
|= qp
->rq
.wqe_shift
- 4;
2279 context
->sq_size_stride
= ilog2(qp
->sq
.wqe_cnt
) << 3;
2280 context
->sq_size_stride
|= qp
->sq
.wqe_shift
- 4;
2282 if (new_state
== IB_QPS_RESET
&& qp
->counter_index
)
2283 mlx4_ib_free_qp_counter(dev
, qp
);
2285 if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
) {
2286 context
->sq_size_stride
|= !!qp
->sq_no_prefetch
<< 7;
2287 context
->xrcd
= cpu_to_be32((u32
) qp
->xrcdn
);
2288 if (qp_type
== IB_QPT_RAW_PACKET
)
2289 context
->param3
|= cpu_to_be32(1 << 30);
2293 context
->usr_page
= cpu_to_be32(
2294 mlx4_to_hw_uar_index(dev
->dev
,
2295 to_mucontext(ibuobject
->context
)
2298 context
->usr_page
= cpu_to_be32(
2299 mlx4_to_hw_uar_index(dev
->dev
, dev
->priv_uar
.index
));
2301 if (attr_mask
& IB_QP_DEST_QPN
)
2302 context
->remote_qpn
= cpu_to_be32(attr
->dest_qp_num
);
2304 if (attr_mask
& IB_QP_PORT
) {
2305 if (cur_state
== IB_QPS_SQD
&& new_state
== IB_QPS_SQD
&&
2306 !(attr_mask
& IB_QP_AV
)) {
2307 mlx4_set_sched(&context
->pri_path
, attr
->port_num
);
2308 optpar
|= MLX4_QP_OPTPAR_SCHED_QUEUE
;
2312 if (cur_state
== IB_QPS_INIT
&& new_state
== IB_QPS_RTR
) {
2313 err
= create_qp_lb_counter(dev
, qp
);
2318 dev
->counters_table
[qp
->port
- 1].default_counter
;
2319 if (qp
->counter_index
)
2320 counter_index
= qp
->counter_index
->index
;
2322 if (counter_index
!= -1) {
2323 context
->pri_path
.counter_index
= counter_index
;
2324 optpar
|= MLX4_QP_OPTPAR_COUNTER_INDEX
;
2325 if (qp
->counter_index
) {
2326 context
->pri_path
.fl
|=
2327 MLX4_FL_ETH_SRC_CHECK_MC_LB
;
2328 context
->pri_path
.vlan_control
|=
2329 MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER
;
2332 context
->pri_path
.counter_index
=
2333 MLX4_SINK_COUNTER_INDEX(dev
->dev
);
2335 if (qp
->flags
& MLX4_IB_QP_NETIF
) {
2336 mlx4_ib_steer_qp_reg(dev
, qp
, 1);
2340 if (qp_type
== IB_QPT_GSI
) {
2341 enum ib_gid_type gid_type
= qp
->flags
& MLX4_IB_ROCE_V2_GSI_QP
?
2342 IB_GID_TYPE_ROCE_UDP_ENCAP
: IB_GID_TYPE_ROCE
;
2343 u8 qpc_roce_mode
= gid_type_to_qpc(gid_type
);
2345 context
->rlkey_roce_mode
|= (qpc_roce_mode
<< 6);
2349 if (attr_mask
& IB_QP_PKEY_INDEX
) {
2350 if (qp
->mlx4_ib_qp_type
& MLX4_IB_QPT_ANY_SRIOV
)
2351 context
->pri_path
.disable_pkey_check
= 0x40;
2352 context
->pri_path
.pkey_index
= attr
->pkey_index
;
2353 optpar
|= MLX4_QP_OPTPAR_PKEY_INDEX
;
2356 if (attr_mask
& IB_QP_AV
) {
2357 u8 port_num
= mlx4_is_bonded(dev
->dev
) ? 1 :
2358 attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
;
2362 rdma_cap_eth_ah(&dev
->ib_dev
, port_num
) &&
2363 rdma_ah_get_ah_flags(&attr
->ah_attr
) & IB_AH_GRH
;
2366 gid_attr
= attr
->ah_attr
.grh
.sgid_attr
;
2367 vlan
= rdma_vlan_dev_vlan_id(gid_attr
->ndev
);
2368 memcpy(smac
, gid_attr
->ndev
->dev_addr
, ETH_ALEN
);
2371 if (mlx4_set_path(dev
, attr
, attr_mask
, qp
, &context
->pri_path
,
2372 port_num
, vlan
, smac
))
2375 optpar
|= (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH
|
2376 MLX4_QP_OPTPAR_SCHED_QUEUE
);
2379 (cur_state
== IB_QPS_INIT
&& new_state
== IB_QPS_RTR
)) {
2380 u8 qpc_roce_mode
= gid_type_to_qpc(gid_attr
->gid_type
);
2382 if (qpc_roce_mode
== MLX4_QPC_ROCE_MODE_UNDEFINED
) {
2386 context
->rlkey_roce_mode
|= (qpc_roce_mode
<< 6);
2391 if (attr_mask
& IB_QP_TIMEOUT
) {
2392 context
->pri_path
.ackto
|= attr
->timeout
<< 3;
2393 optpar
|= MLX4_QP_OPTPAR_ACK_TIMEOUT
;
2396 if (attr_mask
& IB_QP_ALT_PATH
) {
2397 if (attr
->alt_port_num
== 0 ||
2398 attr
->alt_port_num
> dev
->dev
->caps
.num_ports
)
2401 if (attr
->alt_pkey_index
>=
2402 dev
->dev
->caps
.pkey_table_len
[attr
->alt_port_num
])
2405 if (mlx4_set_alt_path(dev
, attr
, attr_mask
, qp
,
2407 attr
->alt_port_num
))
2410 context
->alt_path
.pkey_index
= attr
->alt_pkey_index
;
2411 context
->alt_path
.ackto
= attr
->alt_timeout
<< 3;
2412 optpar
|= MLX4_QP_OPTPAR_ALT_ADDR_PATH
;
2415 context
->pd
= cpu_to_be32(pd
->pdn
);
2418 context
->params1
= cpu_to_be32(MLX4_IB_ACK_REQ_FREQ
<< 28);
2419 get_cqs(qp
, src_type
, &send_cq
, &recv_cq
);
2420 } else { /* Set dummy CQs to be compatible with HV and PRM */
2421 send_cq
= to_mcq(rwq_ind_tbl
->ind_tbl
[0]->cq
);
2424 context
->cqn_send
= cpu_to_be32(send_cq
->mcq
.cqn
);
2425 context
->cqn_recv
= cpu_to_be32(recv_cq
->mcq
.cqn
);
2427 /* Set "fast registration enabled" for all kernel QPs */
2429 context
->params1
|= cpu_to_be32(1 << 11);
2431 if (attr_mask
& IB_QP_RNR_RETRY
) {
2432 context
->params1
|= cpu_to_be32(attr
->rnr_retry
<< 13);
2433 optpar
|= MLX4_QP_OPTPAR_RNR_RETRY
;
2436 if (attr_mask
& IB_QP_RETRY_CNT
) {
2437 context
->params1
|= cpu_to_be32(attr
->retry_cnt
<< 16);
2438 optpar
|= MLX4_QP_OPTPAR_RETRY_COUNT
;
2441 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
) {
2442 if (attr
->max_rd_atomic
)
2444 cpu_to_be32(fls(attr
->max_rd_atomic
- 1) << 21);
2445 optpar
|= MLX4_QP_OPTPAR_SRA_MAX
;
2448 if (attr_mask
& IB_QP_SQ_PSN
)
2449 context
->next_send_psn
= cpu_to_be32(attr
->sq_psn
);
2451 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
) {
2452 if (attr
->max_dest_rd_atomic
)
2454 cpu_to_be32(fls(attr
->max_dest_rd_atomic
- 1) << 21);
2455 optpar
|= MLX4_QP_OPTPAR_RRA_MAX
;
2458 if (attr_mask
& (IB_QP_ACCESS_FLAGS
| IB_QP_MAX_DEST_RD_ATOMIC
)) {
2459 context
->params2
|= to_mlx4_access_flags(qp
, attr
, attr_mask
);
2460 optpar
|= MLX4_QP_OPTPAR_RWE
| MLX4_QP_OPTPAR_RRE
| MLX4_QP_OPTPAR_RAE
;
2464 context
->params2
|= cpu_to_be32(MLX4_QP_BIT_RIC
);
2466 if (attr_mask
& IB_QP_MIN_RNR_TIMER
) {
2467 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->min_rnr_timer
<< 24);
2468 optpar
|= MLX4_QP_OPTPAR_RNR_TIMEOUT
;
2470 if (attr_mask
& IB_QP_RQ_PSN
)
2471 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->rq_psn
);
2473 /* proxy and tunnel qp qkeys will be changed in modify-qp wrappers */
2474 if (attr_mask
& IB_QP_QKEY
) {
2475 if (qp
->mlx4_ib_qp_type
&
2476 (MLX4_IB_QPT_PROXY_SMI_OWNER
| MLX4_IB_QPT_TUN_SMI_OWNER
))
2477 context
->qkey
= cpu_to_be32(IB_QP_SET_QKEY
);
2479 if (mlx4_is_mfunc(dev
->dev
) &&
2480 !(qp
->mlx4_ib_qp_type
& MLX4_IB_QPT_ANY_SRIOV
) &&
2481 (attr
->qkey
& MLX4_RESERVED_QKEY_MASK
) ==
2482 MLX4_RESERVED_QKEY_BASE
) {
2483 pr_err("Cannot use reserved QKEY"
2484 " 0x%x (range 0xffff0000..0xffffffff"
2485 " is reserved)\n", attr
->qkey
);
2489 context
->qkey
= cpu_to_be32(attr
->qkey
);
2491 optpar
|= MLX4_QP_OPTPAR_Q_KEY
;
2495 context
->srqn
= cpu_to_be32(1 << 24 |
2496 to_msrq(ibsrq
)->msrq
.srqn
);
2498 if (qp
->rq
.wqe_cnt
&&
2499 cur_state
== IB_QPS_RESET
&&
2500 new_state
== IB_QPS_INIT
)
2501 context
->db_rec_addr
= cpu_to_be64(qp
->db
.dma
);
2503 if (cur_state
== IB_QPS_INIT
&&
2504 new_state
== IB_QPS_RTR
&&
2505 (qp_type
== IB_QPT_GSI
|| qp_type
== IB_QPT_SMI
||
2506 qp_type
== IB_QPT_UD
|| qp_type
== IB_QPT_RAW_PACKET
)) {
2507 context
->pri_path
.sched_queue
= (qp
->port
- 1) << 6;
2508 if (qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_SMI
||
2509 qp
->mlx4_ib_qp_type
&
2510 (MLX4_IB_QPT_PROXY_SMI_OWNER
| MLX4_IB_QPT_TUN_SMI_OWNER
)) {
2511 context
->pri_path
.sched_queue
|= MLX4_IB_DEFAULT_QP0_SCHED_QUEUE
;
2512 if (qp
->mlx4_ib_qp_type
!= MLX4_IB_QPT_SMI
)
2513 context
->pri_path
.fl
= 0x80;
2515 if (qp
->mlx4_ib_qp_type
& MLX4_IB_QPT_ANY_SRIOV
)
2516 context
->pri_path
.fl
= 0x80;
2517 context
->pri_path
.sched_queue
|= MLX4_IB_DEFAULT_SCHED_QUEUE
;
2519 if (rdma_port_get_link_layer(&dev
->ib_dev
, qp
->port
) ==
2520 IB_LINK_LAYER_ETHERNET
) {
2521 if (qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_TUN_GSI
||
2522 qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_GSI
)
2523 context
->pri_path
.feup
= 1 << 7; /* don't fsm */
2524 /* handle smac_index */
2525 if (qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_UD
||
2526 qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_PROXY_GSI
||
2527 qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_TUN_GSI
) {
2528 err
= handle_eth_ud_smac_index(dev
, qp
, context
);
2533 if (qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_PROXY_GSI
)
2534 dev
->qp1_proxy
[qp
->port
- 1] = qp
;
2539 if (qp_type
== IB_QPT_RAW_PACKET
) {
2540 context
->pri_path
.ackto
= (context
->pri_path
.ackto
& 0xf8) |
2541 MLX4_IB_LINK_TYPE_ETH
;
2542 if (dev
->dev
->caps
.tunnel_offload_mode
== MLX4_TUNNEL_OFFLOAD_MODE_VXLAN
) {
2543 /* set QP to receive both tunneled & non-tunneled packets */
2545 context
->srqn
= cpu_to_be32(7 << 28);
2549 if (qp_type
== IB_QPT_UD
&& (new_state
== IB_QPS_RTR
)) {
2550 int is_eth
= rdma_port_get_link_layer(
2551 &dev
->ib_dev
, qp
->port
) ==
2552 IB_LINK_LAYER_ETHERNET
;
2554 context
->pri_path
.ackto
= MLX4_IB_LINK_TYPE_ETH
;
2555 optpar
|= MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH
;
2559 if (cur_state
== IB_QPS_RTS
&& new_state
== IB_QPS_SQD
&&
2560 attr_mask
& IB_QP_EN_SQD_ASYNC_NOTIFY
&& attr
->en_sqd_async_notify
)
2566 cur_state
== IB_QPS_RESET
&&
2567 new_state
== IB_QPS_INIT
)
2568 context
->rlkey_roce_mode
|= (1 << 4);
2571 * Before passing a kernel QP to the HW, make sure that the
2572 * ownership bits of the send queue are set and the SQ
2573 * headroom is stamped so that the hardware doesn't start
2574 * processing stale work requests.
2577 cur_state
== IB_QPS_RESET
&&
2578 new_state
== IB_QPS_INIT
) {
2579 struct mlx4_wqe_ctrl_seg
*ctrl
;
2582 for (i
= 0; i
< qp
->sq
.wqe_cnt
; ++i
) {
2583 ctrl
= get_send_wqe(qp
, i
);
2584 ctrl
->owner_opcode
= cpu_to_be32(1 << 31);
2585 if (qp
->sq_max_wqes_per_wr
== 1)
2586 ctrl
->qpn_vlan
.fence_size
=
2587 1 << (qp
->sq
.wqe_shift
- 4);
2589 stamp_send_wqe(qp
, i
, 1 << qp
->sq
.wqe_shift
);
2594 cur_state
== IB_QPS_RESET
&&
2595 new_state
== IB_QPS_INIT
) {
2596 fill_qp_rss_context(context
, qp
);
2597 context
->flags
|= cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET
);
2600 err
= mlx4_qp_modify(dev
->dev
, &qp
->mtt
, to_mlx4_state(cur_state
),
2601 to_mlx4_state(new_state
), context
, optpar
,
2602 sqd_event
, &qp
->mqp
);
2606 qp
->state
= new_state
;
2608 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
2609 qp
->atomic_rd_en
= attr
->qp_access_flags
;
2610 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
2611 qp
->resp_depth
= attr
->max_dest_rd_atomic
;
2612 if (attr_mask
& IB_QP_PORT
) {
2613 qp
->port
= attr
->port_num
;
2614 update_mcg_macs(dev
, qp
);
2616 if (attr_mask
& IB_QP_ALT_PATH
)
2617 qp
->alt_port
= attr
->alt_port_num
;
2619 if (is_sqp(dev
, qp
))
2620 store_sqp_attrs(to_msqp(qp
), attr
, attr_mask
);
2623 * If we moved QP0 to RTR, bring the IB link up; if we moved
2624 * QP0 to RESET or ERROR, bring the link back down.
2626 if (is_qp0(dev
, qp
)) {
2627 if (cur_state
!= IB_QPS_RTR
&& new_state
== IB_QPS_RTR
)
2628 if (mlx4_INIT_PORT(dev
->dev
, qp
->port
))
2629 pr_warn("INIT_PORT failed for port %d\n",
2632 if (cur_state
!= IB_QPS_RESET
&& cur_state
!= IB_QPS_ERR
&&
2633 (new_state
== IB_QPS_RESET
|| new_state
== IB_QPS_ERR
))
2634 mlx4_CLOSE_PORT(dev
->dev
, qp
->port
);
2638 * If we moved a kernel QP to RESET, clean up all old CQ
2639 * entries and reinitialize the QP.
2641 if (new_state
== IB_QPS_RESET
) {
2643 mlx4_ib_cq_clean(recv_cq
, qp
->mqp
.qpn
,
2644 ibsrq
? to_msrq(ibsrq
) : NULL
);
2645 if (send_cq
!= recv_cq
)
2646 mlx4_ib_cq_clean(send_cq
, qp
->mqp
.qpn
, NULL
);
2652 qp
->sq_next_wqe
= 0;
2656 if (qp
->flags
& MLX4_IB_QP_NETIF
)
2657 mlx4_ib_steer_qp_reg(dev
, qp
, 0);
2659 if (qp
->pri
.smac
|| (!qp
->pri
.smac
&& qp
->pri
.smac_port
)) {
2660 mlx4_unregister_mac(dev
->dev
, qp
->pri
.smac_port
, qp
->pri
.smac
);
2662 qp
->pri
.smac_port
= 0;
2665 mlx4_unregister_mac(dev
->dev
, qp
->alt
.smac_port
, qp
->alt
.smac
);
2668 if (qp
->pri
.vid
< 0x1000) {
2669 mlx4_unregister_vlan(dev
->dev
, qp
->pri
.vlan_port
, qp
->pri
.vid
);
2670 qp
->pri
.vid
= 0xFFFF;
2671 qp
->pri
.candidate_vid
= 0xFFFF;
2672 qp
->pri
.update_vid
= 0;
2675 if (qp
->alt
.vid
< 0x1000) {
2676 mlx4_unregister_vlan(dev
->dev
, qp
->alt
.vlan_port
, qp
->alt
.vid
);
2677 qp
->alt
.vid
= 0xFFFF;
2678 qp
->alt
.candidate_vid
= 0xFFFF;
2679 qp
->alt
.update_vid
= 0;
2683 if (err
&& qp
->counter_index
)
2684 mlx4_ib_free_qp_counter(dev
, qp
);
2685 if (err
&& steer_qp
)
2686 mlx4_ib_steer_qp_reg(dev
, qp
, 0);
2688 if (qp
->pri
.candidate_smac
||
2689 (!qp
->pri
.candidate_smac
&& qp
->pri
.candidate_smac_port
)) {
2691 mlx4_unregister_mac(dev
->dev
, qp
->pri
.candidate_smac_port
, qp
->pri
.candidate_smac
);
2693 if (qp
->pri
.smac
|| (!qp
->pri
.smac
&& qp
->pri
.smac_port
))
2694 mlx4_unregister_mac(dev
->dev
, qp
->pri
.smac_port
, qp
->pri
.smac
);
2695 qp
->pri
.smac
= qp
->pri
.candidate_smac
;
2696 qp
->pri
.smac_index
= qp
->pri
.candidate_smac_index
;
2697 qp
->pri
.smac_port
= qp
->pri
.candidate_smac_port
;
2699 qp
->pri
.candidate_smac
= 0;
2700 qp
->pri
.candidate_smac_index
= 0;
2701 qp
->pri
.candidate_smac_port
= 0;
2703 if (qp
->alt
.candidate_smac
) {
2705 mlx4_unregister_mac(dev
->dev
, qp
->alt
.candidate_smac_port
, qp
->alt
.candidate_smac
);
2708 mlx4_unregister_mac(dev
->dev
, qp
->alt
.smac_port
, qp
->alt
.smac
);
2709 qp
->alt
.smac
= qp
->alt
.candidate_smac
;
2710 qp
->alt
.smac_index
= qp
->alt
.candidate_smac_index
;
2711 qp
->alt
.smac_port
= qp
->alt
.candidate_smac_port
;
2713 qp
->alt
.candidate_smac
= 0;
2714 qp
->alt
.candidate_smac_index
= 0;
2715 qp
->alt
.candidate_smac_port
= 0;
2718 if (qp
->pri
.update_vid
) {
2720 if (qp
->pri
.candidate_vid
< 0x1000)
2721 mlx4_unregister_vlan(dev
->dev
, qp
->pri
.candidate_vlan_port
,
2722 qp
->pri
.candidate_vid
);
2724 if (qp
->pri
.vid
< 0x1000)
2725 mlx4_unregister_vlan(dev
->dev
, qp
->pri
.vlan_port
,
2727 qp
->pri
.vid
= qp
->pri
.candidate_vid
;
2728 qp
->pri
.vlan_port
= qp
->pri
.candidate_vlan_port
;
2729 qp
->pri
.vlan_index
= qp
->pri
.candidate_vlan_index
;
2731 qp
->pri
.candidate_vid
= 0xFFFF;
2732 qp
->pri
.update_vid
= 0;
2735 if (qp
->alt
.update_vid
) {
2737 if (qp
->alt
.candidate_vid
< 0x1000)
2738 mlx4_unregister_vlan(dev
->dev
, qp
->alt
.candidate_vlan_port
,
2739 qp
->alt
.candidate_vid
);
2741 if (qp
->alt
.vid
< 0x1000)
2742 mlx4_unregister_vlan(dev
->dev
, qp
->alt
.vlan_port
,
2744 qp
->alt
.vid
= qp
->alt
.candidate_vid
;
2745 qp
->alt
.vlan_port
= qp
->alt
.candidate_vlan_port
;
2746 qp
->alt
.vlan_index
= qp
->alt
.candidate_vlan_index
;
2748 qp
->alt
.candidate_vid
= 0xFFFF;
2749 qp
->alt
.update_vid
= 0;
2756 MLX4_IB_MODIFY_QP_RSS_SUP_ATTR_MSK
= (IB_QP_STATE
|
2760 static int _mlx4_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
2761 int attr_mask
, struct ib_udata
*udata
)
2763 enum rdma_link_layer ll
= IB_LINK_LAYER_UNSPECIFIED
;
2764 struct mlx4_ib_dev
*dev
= to_mdev(ibqp
->device
);
2765 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
2766 enum ib_qp_state cur_state
, new_state
;
2768 mutex_lock(&qp
->mutex
);
2770 cur_state
= attr_mask
& IB_QP_CUR_STATE
? attr
->cur_qp_state
: qp
->state
;
2771 new_state
= attr_mask
& IB_QP_STATE
? attr
->qp_state
: cur_state
;
2773 if (cur_state
!= new_state
|| cur_state
!= IB_QPS_RESET
) {
2774 int port
= attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
;
2775 ll
= rdma_port_get_link_layer(&dev
->ib_dev
, port
);
2778 if (!ib_modify_qp_is_ok(cur_state
, new_state
, ibqp
->qp_type
,
2780 pr_debug("qpn 0x%x: invalid attribute mask specified "
2781 "for transition %d to %d. qp_type %d,"
2782 " attr_mask 0x%x\n",
2783 ibqp
->qp_num
, cur_state
, new_state
,
2784 ibqp
->qp_type
, attr_mask
);
2788 if (ibqp
->rwq_ind_tbl
) {
2789 if (!(((cur_state
== IB_QPS_RESET
) &&
2790 (new_state
== IB_QPS_INIT
)) ||
2791 ((cur_state
== IB_QPS_INIT
) &&
2792 (new_state
== IB_QPS_RTR
)))) {
2793 pr_debug("qpn 0x%x: RSS QP unsupported transition %d to %d\n",
2794 ibqp
->qp_num
, cur_state
, new_state
);
2800 if (attr_mask
& ~MLX4_IB_MODIFY_QP_RSS_SUP_ATTR_MSK
) {
2801 pr_debug("qpn 0x%x: RSS QP unsupported attribute mask 0x%x for transition %d to %d\n",
2802 ibqp
->qp_num
, attr_mask
, cur_state
, new_state
);
2809 if (mlx4_is_bonded(dev
->dev
) && (attr_mask
& IB_QP_PORT
)) {
2810 if ((cur_state
== IB_QPS_RESET
) && (new_state
== IB_QPS_INIT
)) {
2811 if ((ibqp
->qp_type
== IB_QPT_RC
) ||
2812 (ibqp
->qp_type
== IB_QPT_UD
) ||
2813 (ibqp
->qp_type
== IB_QPT_UC
) ||
2814 (ibqp
->qp_type
== IB_QPT_RAW_PACKET
) ||
2815 (ibqp
->qp_type
== IB_QPT_XRC_INI
)) {
2816 attr
->port_num
= mlx4_ib_bond_next_port(dev
);
2819 /* no sense in changing port_num
2820 * when ports are bonded */
2821 attr_mask
&= ~IB_QP_PORT
;
2825 if ((attr_mask
& IB_QP_PORT
) &&
2826 (attr
->port_num
== 0 || attr
->port_num
> dev
->num_ports
)) {
2827 pr_debug("qpn 0x%x: invalid port number (%d) specified "
2828 "for transition %d to %d. qp_type %d\n",
2829 ibqp
->qp_num
, attr
->port_num
, cur_state
,
2830 new_state
, ibqp
->qp_type
);
2834 if ((attr_mask
& IB_QP_PORT
) && (ibqp
->qp_type
== IB_QPT_RAW_PACKET
) &&
2835 (rdma_port_get_link_layer(&dev
->ib_dev
, attr
->port_num
) !=
2836 IB_LINK_LAYER_ETHERNET
))
2839 if (attr_mask
& IB_QP_PKEY_INDEX
) {
2840 int p
= attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
;
2841 if (attr
->pkey_index
>= dev
->dev
->caps
.pkey_table_len
[p
]) {
2842 pr_debug("qpn 0x%x: invalid pkey index (%d) specified "
2843 "for transition %d to %d. qp_type %d\n",
2844 ibqp
->qp_num
, attr
->pkey_index
, cur_state
,
2845 new_state
, ibqp
->qp_type
);
2850 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
&&
2851 attr
->max_rd_atomic
> dev
->dev
->caps
.max_qp_init_rdma
) {
2852 pr_debug("qpn 0x%x: max_rd_atomic (%d) too large. "
2853 "Transition %d to %d. qp_type %d\n",
2854 ibqp
->qp_num
, attr
->max_rd_atomic
, cur_state
,
2855 new_state
, ibqp
->qp_type
);
2859 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
&&
2860 attr
->max_dest_rd_atomic
> dev
->dev
->caps
.max_qp_dest_rdma
) {
2861 pr_debug("qpn 0x%x: max_dest_rd_atomic (%d) too large. "
2862 "Transition %d to %d. qp_type %d\n",
2863 ibqp
->qp_num
, attr
->max_dest_rd_atomic
, cur_state
,
2864 new_state
, ibqp
->qp_type
);
2868 if (cur_state
== new_state
&& cur_state
== IB_QPS_RESET
) {
2873 if (ibqp
->rwq_ind_tbl
&& (new_state
== IB_QPS_INIT
)) {
2874 err
= bringup_rss_rwqs(ibqp
->rwq_ind_tbl
, attr
->port_num
);
2879 err
= __mlx4_ib_modify_qp(ibqp
, MLX4_IB_QP_SRC
, attr
, attr_mask
,
2880 cur_state
, new_state
);
2882 if (ibqp
->rwq_ind_tbl
&& err
)
2883 bring_down_rss_rwqs(ibqp
->rwq_ind_tbl
);
2885 if (mlx4_is_bonded(dev
->dev
) && (attr_mask
& IB_QP_PORT
))
2889 mutex_unlock(&qp
->mutex
);
2893 int mlx4_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
2894 int attr_mask
, struct ib_udata
*udata
)
2896 struct mlx4_ib_qp
*mqp
= to_mqp(ibqp
);
2899 ret
= _mlx4_ib_modify_qp(ibqp
, attr
, attr_mask
, udata
);
2901 if (mqp
->mlx4_ib_qp_type
== MLX4_IB_QPT_GSI
) {
2902 struct mlx4_ib_sqp
*sqp
= to_msqp(mqp
);
2905 if (sqp
->roce_v2_gsi
)
2906 err
= ib_modify_qp(sqp
->roce_v2_gsi
, attr
, attr_mask
);
2908 pr_err("Failed to modify GSI QP for RoCEv2 (%d)\n",
2914 static int vf_get_qp0_qkey(struct mlx4_dev
*dev
, int qpn
, u32
*qkey
)
2917 for (i
= 0; i
< dev
->caps
.num_ports
; i
++) {
2918 if (qpn
== dev
->caps
.spec_qps
[i
].qp0_proxy
||
2919 qpn
== dev
->caps
.spec_qps
[i
].qp0_tunnel
) {
2920 *qkey
= dev
->caps
.spec_qps
[i
].qp0_qkey
;
2927 static int build_sriov_qp0_header(struct mlx4_ib_sqp
*sqp
,
2928 struct ib_ud_wr
*wr
,
2929 void *wqe
, unsigned *mlx_seg_len
)
2931 struct mlx4_ib_dev
*mdev
= to_mdev(sqp
->qp
.ibqp
.device
);
2932 struct ib_device
*ib_dev
= &mdev
->ib_dev
;
2933 struct mlx4_wqe_mlx_seg
*mlx
= wqe
;
2934 struct mlx4_wqe_inline_seg
*inl
= wqe
+ sizeof *mlx
;
2935 struct mlx4_ib_ah
*ah
= to_mah(wr
->ah
);
2943 if (wr
->wr
.opcode
!= IB_WR_SEND
)
2948 for (i
= 0; i
< wr
->wr
.num_sge
; ++i
)
2949 send_size
+= wr
->wr
.sg_list
[i
].length
;
2951 /* for proxy-qp0 sends, need to add in size of tunnel header */
2952 /* for tunnel-qp0 sends, tunnel header is already in s/g list */
2953 if (sqp
->qp
.mlx4_ib_qp_type
== MLX4_IB_QPT_PROXY_SMI_OWNER
)
2954 send_size
+= sizeof (struct mlx4_ib_tunnel_header
);
2956 ib_ud_header_init(send_size
, 1, 0, 0, 0, 0, 0, 0, &sqp
->ud_header
);
2958 if (sqp
->qp
.mlx4_ib_qp_type
== MLX4_IB_QPT_PROXY_SMI_OWNER
) {
2959 sqp
->ud_header
.lrh
.service_level
=
2960 be32_to_cpu(ah
->av
.ib
.sl_tclass_flowlabel
) >> 28;
2961 sqp
->ud_header
.lrh
.destination_lid
=
2962 cpu_to_be16(ah
->av
.ib
.g_slid
& 0x7f);
2963 sqp
->ud_header
.lrh
.source_lid
=
2964 cpu_to_be16(ah
->av
.ib
.g_slid
& 0x7f);
2967 mlx
->flags
&= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
);
2969 /* force loopback */
2970 mlx
->flags
|= cpu_to_be32(MLX4_WQE_MLX_VL15
| 0x1 | MLX4_WQE_MLX_SLR
);
2971 mlx
->rlid
= sqp
->ud_header
.lrh
.destination_lid
;
2973 sqp
->ud_header
.lrh
.virtual_lane
= 0;
2974 sqp
->ud_header
.bth
.solicited_event
= !!(wr
->wr
.send_flags
& IB_SEND_SOLICITED
);
2975 ib_get_cached_pkey(ib_dev
, sqp
->qp
.port
, 0, &pkey
);
2976 sqp
->ud_header
.bth
.pkey
= cpu_to_be16(pkey
);
2977 if (sqp
->qp
.mlx4_ib_qp_type
== MLX4_IB_QPT_TUN_SMI_OWNER
)
2978 sqp
->ud_header
.bth
.destination_qpn
= cpu_to_be32(wr
->remote_qpn
);
2980 sqp
->ud_header
.bth
.destination_qpn
=
2981 cpu_to_be32(mdev
->dev
->caps
.spec_qps
[sqp
->qp
.port
- 1].qp0_tunnel
);
2983 sqp
->ud_header
.bth
.psn
= cpu_to_be32((sqp
->send_psn
++) & ((1 << 24) - 1));
2984 if (mlx4_is_master(mdev
->dev
)) {
2985 if (mlx4_get_parav_qkey(mdev
->dev
, sqp
->qp
.mqp
.qpn
, &qkey
))
2988 if (vf_get_qp0_qkey(mdev
->dev
, sqp
->qp
.mqp
.qpn
, &qkey
))
2991 sqp
->ud_header
.deth
.qkey
= cpu_to_be32(qkey
);
2992 sqp
->ud_header
.deth
.source_qpn
= cpu_to_be32(sqp
->qp
.mqp
.qpn
);
2994 sqp
->ud_header
.bth
.opcode
= IB_OPCODE_UD_SEND_ONLY
;
2995 sqp
->ud_header
.immediate_present
= 0;
2997 header_size
= ib_ud_header_pack(&sqp
->ud_header
, sqp
->header_buf
);
3000 * Inline data segments may not cross a 64 byte boundary. If
3001 * our UD header is bigger than the space available up to the
3002 * next 64 byte boundary in the WQE, use two inline data
3003 * segments to hold the UD header.
3005 spc
= MLX4_INLINE_ALIGN
-
3006 ((unsigned long) (inl
+ 1) & (MLX4_INLINE_ALIGN
- 1));
3007 if (header_size
<= spc
) {
3008 inl
->byte_count
= cpu_to_be32(1 << 31 | header_size
);
3009 memcpy(inl
+ 1, sqp
->header_buf
, header_size
);
3012 inl
->byte_count
= cpu_to_be32(1 << 31 | spc
);
3013 memcpy(inl
+ 1, sqp
->header_buf
, spc
);
3015 inl
= (void *) (inl
+ 1) + spc
;
3016 memcpy(inl
+ 1, sqp
->header_buf
+ spc
, header_size
- spc
);
3018 * Need a barrier here to make sure all the data is
3019 * visible before the byte_count field is set.
3020 * Otherwise the HCA prefetcher could grab the 64-byte
3021 * chunk with this inline segment and get a valid (!=
3022 * 0xffffffff) byte count but stale data, and end up
3023 * generating a packet with bad headers.
3025 * The first inline segment's byte_count field doesn't
3026 * need a barrier, because it comes after a
3027 * control/MLX segment and therefore is at an offset
3031 inl
->byte_count
= cpu_to_be32(1 << 31 | (header_size
- spc
));
3036 ALIGN(i
* sizeof (struct mlx4_wqe_inline_seg
) + header_size
, 16);
3040 static u8
sl_to_vl(struct mlx4_ib_dev
*dev
, u8 sl
, int port_num
)
3042 union sl2vl_tbl_to_u64 tmp_vltab
;
3047 tmp_vltab
.sl64
= atomic64_read(&dev
->sl2vl
[port_num
- 1]);
3048 vl
= tmp_vltab
.sl8
[sl
>> 1];
3056 static int fill_gid_by_hw_index(struct mlx4_ib_dev
*ibdev
, u8 port_num
,
3057 int index
, union ib_gid
*gid
,
3058 enum ib_gid_type
*gid_type
)
3060 struct mlx4_ib_iboe
*iboe
= &ibdev
->iboe
;
3061 struct mlx4_port_gid_table
*port_gid_table
;
3062 unsigned long flags
;
3064 port_gid_table
= &iboe
->gids
[port_num
- 1];
3065 spin_lock_irqsave(&iboe
->lock
, flags
);
3066 memcpy(gid
, &port_gid_table
->gids
[index
].gid
, sizeof(*gid
));
3067 *gid_type
= port_gid_table
->gids
[index
].gid_type
;
3068 spin_unlock_irqrestore(&iboe
->lock
, flags
);
3069 if (rdma_is_zero_gid(gid
))
3075 #define MLX4_ROCEV2_QP1_SPORT 0xC000
3076 static int build_mlx_header(struct mlx4_ib_sqp
*sqp
, struct ib_ud_wr
*wr
,
3077 void *wqe
, unsigned *mlx_seg_len
)
3079 struct ib_device
*ib_dev
= sqp
->qp
.ibqp
.device
;
3080 struct mlx4_ib_dev
*ibdev
= to_mdev(ib_dev
);
3081 struct mlx4_wqe_mlx_seg
*mlx
= wqe
;
3082 struct mlx4_wqe_ctrl_seg
*ctrl
= wqe
;
3083 struct mlx4_wqe_inline_seg
*inl
= wqe
+ sizeof *mlx
;
3084 struct mlx4_ib_ah
*ah
= to_mah(wr
->ah
);
3094 bool is_vlan
= false;
3096 bool is_udp
= false;
3100 for (i
= 0; i
< wr
->wr
.num_sge
; ++i
)
3101 send_size
+= wr
->wr
.sg_list
[i
].length
;
3103 is_eth
= rdma_port_get_link_layer(sqp
->qp
.ibqp
.device
, sqp
->qp
.port
) == IB_LINK_LAYER_ETHERNET
;
3104 is_grh
= mlx4_ib_ah_grh_present(ah
);
3106 enum ib_gid_type gid_type
;
3107 if (mlx4_is_mfunc(to_mdev(ib_dev
)->dev
)) {
3108 /* When multi-function is enabled, the ib_core gid
3109 * indexes don't necessarily match the hw ones, so
3110 * we must use our own cache */
3111 err
= mlx4_get_roce_gid_from_slave(to_mdev(ib_dev
)->dev
,
3112 be32_to_cpu(ah
->av
.ib
.port_pd
) >> 24,
3113 ah
->av
.ib
.gid_index
, &sgid
.raw
[0]);
3117 err
= fill_gid_by_hw_index(ibdev
, sqp
->qp
.port
,
3118 ah
->av
.ib
.gid_index
,
3121 is_udp
= gid_type
== IB_GID_TYPE_ROCE_UDP_ENCAP
;
3123 if (ipv6_addr_v4mapped((struct in6_addr
*)&sgid
))
3133 if (ah
->av
.eth
.vlan
!= cpu_to_be16(0xffff)) {
3134 vlan
= be16_to_cpu(ah
->av
.eth
.vlan
) & 0x0fff;
3138 err
= ib_ud_header_init(send_size
, !is_eth
, is_eth
, is_vlan
, is_grh
,
3139 ip_version
, is_udp
, 0, &sqp
->ud_header
);
3144 sqp
->ud_header
.lrh
.service_level
=
3145 be32_to_cpu(ah
->av
.ib
.sl_tclass_flowlabel
) >> 28;
3146 sqp
->ud_header
.lrh
.destination_lid
= ah
->av
.ib
.dlid
;
3147 sqp
->ud_header
.lrh
.source_lid
= cpu_to_be16(ah
->av
.ib
.g_slid
& 0x7f);
3150 if (is_grh
|| (ip_version
== 6)) {
3151 sqp
->ud_header
.grh
.traffic_class
=
3152 (be32_to_cpu(ah
->av
.ib
.sl_tclass_flowlabel
) >> 20) & 0xff;
3153 sqp
->ud_header
.grh
.flow_label
=
3154 ah
->av
.ib
.sl_tclass_flowlabel
& cpu_to_be32(0xfffff);
3155 sqp
->ud_header
.grh
.hop_limit
= ah
->av
.ib
.hop_limit
;
3157 memcpy(sqp
->ud_header
.grh
.source_gid
.raw
, sgid
.raw
, 16);
3159 if (mlx4_is_mfunc(to_mdev(ib_dev
)->dev
)) {
3160 /* When multi-function is enabled, the ib_core gid
3161 * indexes don't necessarily match the hw ones, so
3162 * we must use our own cache
3164 sqp
->ud_header
.grh
.source_gid
.global
.subnet_prefix
=
3165 cpu_to_be64(atomic64_read(&(to_mdev(ib_dev
)->sriov
.
3166 demux
[sqp
->qp
.port
- 1].
3168 sqp
->ud_header
.grh
.source_gid
.global
.interface_id
=
3169 to_mdev(ib_dev
)->sriov
.demux
[sqp
->qp
.port
- 1].
3170 guid_cache
[ah
->av
.ib
.gid_index
];
3172 sqp
->ud_header
.grh
.source_gid
=
3173 ah
->ibah
.sgid_attr
->gid
;
3176 memcpy(sqp
->ud_header
.grh
.destination_gid
.raw
,
3177 ah
->av
.ib
.dgid
, 16);
3180 if (ip_version
== 4) {
3181 sqp
->ud_header
.ip4
.tos
=
3182 (be32_to_cpu(ah
->av
.ib
.sl_tclass_flowlabel
) >> 20) & 0xff;
3183 sqp
->ud_header
.ip4
.id
= 0;
3184 sqp
->ud_header
.ip4
.frag_off
= htons(IP_DF
);
3185 sqp
->ud_header
.ip4
.ttl
= ah
->av
.eth
.hop_limit
;
3187 memcpy(&sqp
->ud_header
.ip4
.saddr
,
3189 memcpy(&sqp
->ud_header
.ip4
.daddr
, ah
->av
.ib
.dgid
+ 12, 4);
3190 sqp
->ud_header
.ip4
.check
= ib_ud_ip4_csum(&sqp
->ud_header
);
3194 sqp
->ud_header
.udp
.dport
= htons(ROCE_V2_UDP_DPORT
);
3195 sqp
->ud_header
.udp
.sport
= htons(MLX4_ROCEV2_QP1_SPORT
);
3196 sqp
->ud_header
.udp
.csum
= 0;
3199 mlx
->flags
&= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
);
3202 mlx
->flags
|= cpu_to_be32((!sqp
->qp
.ibqp
.qp_num
? MLX4_WQE_MLX_VL15
: 0) |
3203 (sqp
->ud_header
.lrh
.destination_lid
==
3204 IB_LID_PERMISSIVE
? MLX4_WQE_MLX_SLR
: 0) |
3205 (sqp
->ud_header
.lrh
.service_level
<< 8));
3206 if (ah
->av
.ib
.port_pd
& cpu_to_be32(0x80000000))
3207 mlx
->flags
|= cpu_to_be32(0x1); /* force loopback */
3208 mlx
->rlid
= sqp
->ud_header
.lrh
.destination_lid
;
3211 switch (wr
->wr
.opcode
) {
3213 sqp
->ud_header
.bth
.opcode
= IB_OPCODE_UD_SEND_ONLY
;
3214 sqp
->ud_header
.immediate_present
= 0;
3216 case IB_WR_SEND_WITH_IMM
:
3217 sqp
->ud_header
.bth
.opcode
= IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE
;
3218 sqp
->ud_header
.immediate_present
= 1;
3219 sqp
->ud_header
.immediate_data
= wr
->wr
.ex
.imm_data
;
3226 struct in6_addr in6
;
3228 u16 pcp
= (be32_to_cpu(ah
->av
.ib
.sl_tclass_flowlabel
) >> 29) << 13;
3230 ether_type
= (!is_udp
) ? ETH_P_IBOE
:
3231 (ip_version
== 4 ? ETH_P_IP
: ETH_P_IPV6
);
3233 mlx
->sched_prio
= cpu_to_be16(pcp
);
3235 ether_addr_copy(sqp
->ud_header
.eth
.smac_h
, ah
->av
.eth
.s_mac
);
3236 memcpy(sqp
->ud_header
.eth
.dmac_h
, ah
->av
.eth
.mac
, 6);
3237 memcpy(&ctrl
->srcrb_flags16
[0], ah
->av
.eth
.mac
, 2);
3238 memcpy(&ctrl
->imm
, ah
->av
.eth
.mac
+ 2, 4);
3239 memcpy(&in6
, sgid
.raw
, sizeof(in6
));
3242 if (!memcmp(sqp
->ud_header
.eth
.smac_h
, sqp
->ud_header
.eth
.dmac_h
, 6))
3243 mlx
->flags
|= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK
);
3245 sqp
->ud_header
.eth
.type
= cpu_to_be16(ether_type
);
3247 sqp
->ud_header
.vlan
.type
= cpu_to_be16(ether_type
);
3248 sqp
->ud_header
.vlan
.tag
= cpu_to_be16(vlan
| pcp
);
3251 sqp
->ud_header
.lrh
.virtual_lane
= !sqp
->qp
.ibqp
.qp_num
? 15 :
3252 sl_to_vl(to_mdev(ib_dev
),
3253 sqp
->ud_header
.lrh
.service_level
,
3255 if (sqp
->qp
.ibqp
.qp_num
&& sqp
->ud_header
.lrh
.virtual_lane
== 15)
3257 if (sqp
->ud_header
.lrh
.destination_lid
== IB_LID_PERMISSIVE
)
3258 sqp
->ud_header
.lrh
.source_lid
= IB_LID_PERMISSIVE
;
3260 sqp
->ud_header
.bth
.solicited_event
= !!(wr
->wr
.send_flags
& IB_SEND_SOLICITED
);
3261 if (!sqp
->qp
.ibqp
.qp_num
)
3262 ib_get_cached_pkey(ib_dev
, sqp
->qp
.port
, sqp
->pkey_index
, &pkey
);
3264 ib_get_cached_pkey(ib_dev
, sqp
->qp
.port
, wr
->pkey_index
, &pkey
);
3265 sqp
->ud_header
.bth
.pkey
= cpu_to_be16(pkey
);
3266 sqp
->ud_header
.bth
.destination_qpn
= cpu_to_be32(wr
->remote_qpn
);
3267 sqp
->ud_header
.bth
.psn
= cpu_to_be32((sqp
->send_psn
++) & ((1 << 24) - 1));
3268 sqp
->ud_header
.deth
.qkey
= cpu_to_be32(wr
->remote_qkey
& 0x80000000 ?
3269 sqp
->qkey
: wr
->remote_qkey
);
3270 sqp
->ud_header
.deth
.source_qpn
= cpu_to_be32(sqp
->qp
.ibqp
.qp_num
);
3272 header_size
= ib_ud_header_pack(&sqp
->ud_header
, sqp
->header_buf
);
3275 pr_err("built UD header of size %d:\n", header_size
);
3276 for (i
= 0; i
< header_size
/ 4; ++i
) {
3278 pr_err(" [%02x] ", i
* 4);
3280 be32_to_cpu(((__be32
*) sqp
->header_buf
)[i
]));
3281 if ((i
+ 1) % 8 == 0)
3288 * Inline data segments may not cross a 64 byte boundary. If
3289 * our UD header is bigger than the space available up to the
3290 * next 64 byte boundary in the WQE, use two inline data
3291 * segments to hold the UD header.
3293 spc
= MLX4_INLINE_ALIGN
-
3294 ((unsigned long) (inl
+ 1) & (MLX4_INLINE_ALIGN
- 1));
3295 if (header_size
<= spc
) {
3296 inl
->byte_count
= cpu_to_be32(1 << 31 | header_size
);
3297 memcpy(inl
+ 1, sqp
->header_buf
, header_size
);
3300 inl
->byte_count
= cpu_to_be32(1 << 31 | spc
);
3301 memcpy(inl
+ 1, sqp
->header_buf
, spc
);
3303 inl
= (void *) (inl
+ 1) + spc
;
3304 memcpy(inl
+ 1, sqp
->header_buf
+ spc
, header_size
- spc
);
3306 * Need a barrier here to make sure all the data is
3307 * visible before the byte_count field is set.
3308 * Otherwise the HCA prefetcher could grab the 64-byte
3309 * chunk with this inline segment and get a valid (!=
3310 * 0xffffffff) byte count but stale data, and end up
3311 * generating a packet with bad headers.
3313 * The first inline segment's byte_count field doesn't
3314 * need a barrier, because it comes after a
3315 * control/MLX segment and therefore is at an offset
3319 inl
->byte_count
= cpu_to_be32(1 << 31 | (header_size
- spc
));
3324 ALIGN(i
* sizeof (struct mlx4_wqe_inline_seg
) + header_size
, 16);
3328 static int mlx4_wq_overflow(struct mlx4_ib_wq
*wq
, int nreq
, struct ib_cq
*ib_cq
)
3331 struct mlx4_ib_cq
*cq
;
3333 cur
= wq
->head
- wq
->tail
;
3334 if (likely(cur
+ nreq
< wq
->max_post
))
3338 spin_lock(&cq
->lock
);
3339 cur
= wq
->head
- wq
->tail
;
3340 spin_unlock(&cq
->lock
);
3342 return cur
+ nreq
>= wq
->max_post
;
3345 static __be32
convert_access(int acc
)
3347 return (acc
& IB_ACCESS_REMOTE_ATOMIC
?
3348 cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC
) : 0) |
3349 (acc
& IB_ACCESS_REMOTE_WRITE
?
3350 cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE
) : 0) |
3351 (acc
& IB_ACCESS_REMOTE_READ
?
3352 cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ
) : 0) |
3353 (acc
& IB_ACCESS_LOCAL_WRITE
? cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_WRITE
) : 0) |
3354 cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_READ
);
3357 static void set_reg_seg(struct mlx4_wqe_fmr_seg
*fseg
,
3358 struct ib_reg_wr
*wr
)
3360 struct mlx4_ib_mr
*mr
= to_mmr(wr
->mr
);
3362 fseg
->flags
= convert_access(wr
->access
);
3363 fseg
->mem_key
= cpu_to_be32(wr
->key
);
3364 fseg
->buf_list
= cpu_to_be64(mr
->page_map
);
3365 fseg
->start_addr
= cpu_to_be64(mr
->ibmr
.iova
);
3366 fseg
->reg_len
= cpu_to_be64(mr
->ibmr
.length
);
3367 fseg
->offset
= 0; /* XXX -- is this just for ZBVA? */
3368 fseg
->page_size
= cpu_to_be32(ilog2(mr
->ibmr
.page_size
));
3369 fseg
->reserved
[0] = 0;
3370 fseg
->reserved
[1] = 0;
3373 static void set_local_inv_seg(struct mlx4_wqe_local_inval_seg
*iseg
, u32 rkey
)
3375 memset(iseg
, 0, sizeof(*iseg
));
3376 iseg
->mem_key
= cpu_to_be32(rkey
);
3379 static __always_inline
void set_raddr_seg(struct mlx4_wqe_raddr_seg
*rseg
,
3380 u64 remote_addr
, u32 rkey
)
3382 rseg
->raddr
= cpu_to_be64(remote_addr
);
3383 rseg
->rkey
= cpu_to_be32(rkey
);
3387 static void set_atomic_seg(struct mlx4_wqe_atomic_seg
*aseg
,
3388 struct ib_atomic_wr
*wr
)
3390 if (wr
->wr
.opcode
== IB_WR_ATOMIC_CMP_AND_SWP
) {
3391 aseg
->swap_add
= cpu_to_be64(wr
->swap
);
3392 aseg
->compare
= cpu_to_be64(wr
->compare_add
);
3393 } else if (wr
->wr
.opcode
== IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
) {
3394 aseg
->swap_add
= cpu_to_be64(wr
->compare_add
);
3395 aseg
->compare
= cpu_to_be64(wr
->compare_add_mask
);
3397 aseg
->swap_add
= cpu_to_be64(wr
->compare_add
);
3403 static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg
*aseg
,
3404 struct ib_atomic_wr
*wr
)
3406 aseg
->swap_add
= cpu_to_be64(wr
->swap
);
3407 aseg
->swap_add_mask
= cpu_to_be64(wr
->swap_mask
);
3408 aseg
->compare
= cpu_to_be64(wr
->compare_add
);
3409 aseg
->compare_mask
= cpu_to_be64(wr
->compare_add_mask
);
3412 static void set_datagram_seg(struct mlx4_wqe_datagram_seg
*dseg
,
3413 struct ib_ud_wr
*wr
)
3415 memcpy(dseg
->av
, &to_mah(wr
->ah
)->av
, sizeof (struct mlx4_av
));
3416 dseg
->dqpn
= cpu_to_be32(wr
->remote_qpn
);
3417 dseg
->qkey
= cpu_to_be32(wr
->remote_qkey
);
3418 dseg
->vlan
= to_mah(wr
->ah
)->av
.eth
.vlan
;
3419 memcpy(dseg
->mac
, to_mah(wr
->ah
)->av
.eth
.mac
, 6);
3422 static void set_tunnel_datagram_seg(struct mlx4_ib_dev
*dev
,
3423 struct mlx4_wqe_datagram_seg
*dseg
,
3424 struct ib_ud_wr
*wr
,
3425 enum mlx4_ib_qp_type qpt
)
3427 union mlx4_ext_av
*av
= &to_mah(wr
->ah
)->av
;
3428 struct mlx4_av sqp_av
= {0};
3429 int port
= *((u8
*) &av
->ib
.port_pd
) & 0x3;
3431 /* force loopback */
3432 sqp_av
.port_pd
= av
->ib
.port_pd
| cpu_to_be32(0x80000000);
3433 sqp_av
.g_slid
= av
->ib
.g_slid
& 0x7f; /* no GRH */
3434 sqp_av
.sl_tclass_flowlabel
= av
->ib
.sl_tclass_flowlabel
&
3435 cpu_to_be32(0xf0000000);
3437 memcpy(dseg
->av
, &sqp_av
, sizeof (struct mlx4_av
));
3438 if (qpt
== MLX4_IB_QPT_PROXY_GSI
)
3439 dseg
->dqpn
= cpu_to_be32(dev
->dev
->caps
.spec_qps
[port
- 1].qp1_tunnel
);
3441 dseg
->dqpn
= cpu_to_be32(dev
->dev
->caps
.spec_qps
[port
- 1].qp0_tunnel
);
3442 /* Use QKEY from the QP context, which is set by master */
3443 dseg
->qkey
= cpu_to_be32(IB_QP_SET_QKEY
);
3446 static void build_tunnel_header(struct ib_ud_wr
*wr
, void *wqe
, unsigned *mlx_seg_len
)
3448 struct mlx4_wqe_inline_seg
*inl
= wqe
;
3449 struct mlx4_ib_tunnel_header hdr
;
3450 struct mlx4_ib_ah
*ah
= to_mah(wr
->ah
);
3454 memcpy(&hdr
.av
, &ah
->av
, sizeof hdr
.av
);
3455 hdr
.remote_qpn
= cpu_to_be32(wr
->remote_qpn
);
3456 hdr
.pkey_index
= cpu_to_be16(wr
->pkey_index
);
3457 hdr
.qkey
= cpu_to_be32(wr
->remote_qkey
);
3458 memcpy(hdr
.mac
, ah
->av
.eth
.mac
, 6);
3459 hdr
.vlan
= ah
->av
.eth
.vlan
;
3461 spc
= MLX4_INLINE_ALIGN
-
3462 ((unsigned long) (inl
+ 1) & (MLX4_INLINE_ALIGN
- 1));
3463 if (sizeof (hdr
) <= spc
) {
3464 memcpy(inl
+ 1, &hdr
, sizeof (hdr
));
3466 inl
->byte_count
= cpu_to_be32(1 << 31 | sizeof (hdr
));
3469 memcpy(inl
+ 1, &hdr
, spc
);
3471 inl
->byte_count
= cpu_to_be32(1 << 31 | spc
);
3473 inl
= (void *) (inl
+ 1) + spc
;
3474 memcpy(inl
+ 1, (void *) &hdr
+ spc
, sizeof (hdr
) - spc
);
3476 inl
->byte_count
= cpu_to_be32(1 << 31 | (sizeof (hdr
) - spc
));
3481 ALIGN(i
* sizeof (struct mlx4_wqe_inline_seg
) + sizeof (hdr
), 16);
3484 static void set_mlx_icrc_seg(void *dseg
)
3487 struct mlx4_wqe_inline_seg
*iseg
= dseg
;
3492 * Need a barrier here before writing the byte_count field to
3493 * make sure that all the data is visible before the
3494 * byte_count field is set. Otherwise, if the segment begins
3495 * a new cacheline, the HCA prefetcher could grab the 64-byte
3496 * chunk and get a valid (!= * 0xffffffff) byte count but
3497 * stale data, and end up sending the wrong data.
3501 iseg
->byte_count
= cpu_to_be32((1 << 31) | 4);
3504 static void set_data_seg(struct mlx4_wqe_data_seg
*dseg
, struct ib_sge
*sg
)
3506 dseg
->lkey
= cpu_to_be32(sg
->lkey
);
3507 dseg
->addr
= cpu_to_be64(sg
->addr
);
3510 * Need a barrier here before writing the byte_count field to
3511 * make sure that all the data is visible before the
3512 * byte_count field is set. Otherwise, if the segment begins
3513 * a new cacheline, the HCA prefetcher could grab the 64-byte
3514 * chunk and get a valid (!= * 0xffffffff) byte count but
3515 * stale data, and end up sending the wrong data.
3519 dseg
->byte_count
= cpu_to_be32(sg
->length
);
3522 static void __set_data_seg(struct mlx4_wqe_data_seg
*dseg
, struct ib_sge
*sg
)
3524 dseg
->byte_count
= cpu_to_be32(sg
->length
);
3525 dseg
->lkey
= cpu_to_be32(sg
->lkey
);
3526 dseg
->addr
= cpu_to_be64(sg
->addr
);
3529 static int build_lso_seg(struct mlx4_wqe_lso_seg
*wqe
, struct ib_ud_wr
*wr
,
3530 struct mlx4_ib_qp
*qp
, unsigned *lso_seg_len
,
3531 __be32
*lso_hdr_sz
, __be32
*blh
)
3533 unsigned halign
= ALIGN(sizeof *wqe
+ wr
->hlen
, 16);
3535 if (unlikely(halign
> MLX4_IB_CACHE_LINE_SIZE
))
3536 *blh
= cpu_to_be32(1 << 6);
3538 if (unlikely(!(qp
->flags
& MLX4_IB_QP_LSO
) &&
3539 wr
->wr
.num_sge
> qp
->sq
.max_gs
- (halign
>> 4)))
3542 memcpy(wqe
->header
, wr
->header
, wr
->hlen
);
3544 *lso_hdr_sz
= cpu_to_be32(wr
->mss
<< 16 | wr
->hlen
);
3545 *lso_seg_len
= halign
;
3549 static __be32
send_ieth(struct ib_send_wr
*wr
)
3551 switch (wr
->opcode
) {
3552 case IB_WR_SEND_WITH_IMM
:
3553 case IB_WR_RDMA_WRITE_WITH_IMM
:
3554 return wr
->ex
.imm_data
;
3556 case IB_WR_SEND_WITH_INV
:
3557 return cpu_to_be32(wr
->ex
.invalidate_rkey
);
3564 static void add_zero_len_inline(void *wqe
)
3566 struct mlx4_wqe_inline_seg
*inl
= wqe
;
3568 inl
->byte_count
= cpu_to_be32(1 << 31);
3571 static int _mlx4_ib_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
3572 struct ib_send_wr
**bad_wr
, bool drain
)
3574 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
3576 struct mlx4_wqe_ctrl_seg
*ctrl
;
3577 struct mlx4_wqe_data_seg
*dseg
;
3578 unsigned long flags
;
3582 int uninitialized_var(stamp
);
3583 int uninitialized_var(size
);
3584 unsigned uninitialized_var(seglen
);
3587 __be32
uninitialized_var(lso_hdr_sz
);
3590 struct mlx4_ib_dev
*mdev
= to_mdev(ibqp
->device
);
3592 if (qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_GSI
) {
3593 struct mlx4_ib_sqp
*sqp
= to_msqp(qp
);
3595 if (sqp
->roce_v2_gsi
) {
3596 struct mlx4_ib_ah
*ah
= to_mah(ud_wr(wr
)->ah
);
3597 enum ib_gid_type gid_type
;
3600 if (!fill_gid_by_hw_index(mdev
, sqp
->qp
.port
,
3601 ah
->av
.ib
.gid_index
,
3603 qp
= (gid_type
== IB_GID_TYPE_ROCE_UDP_ENCAP
) ?
3604 to_mqp(sqp
->roce_v2_gsi
) : qp
;
3606 pr_err("Failed to get gid at index %d. RoCEv2 will not work properly\n",
3607 ah
->av
.ib
.gid_index
);
3611 spin_lock_irqsave(&qp
->sq
.lock
, flags
);
3612 if (mdev
->dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
&&
3620 ind
= qp
->sq_next_wqe
;
3622 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
3626 if (mlx4_wq_overflow(&qp
->sq
, nreq
, qp
->ibqp
.send_cq
)) {
3632 if (unlikely(wr
->num_sge
> qp
->sq
.max_gs
)) {
3638 ctrl
= wqe
= get_send_wqe(qp
, ind
& (qp
->sq
.wqe_cnt
- 1));
3639 qp
->sq
.wrid
[(qp
->sq
.head
+ nreq
) & (qp
->sq
.wqe_cnt
- 1)] = wr
->wr_id
;
3642 (wr
->send_flags
& IB_SEND_SIGNALED
?
3643 cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
) : 0) |
3644 (wr
->send_flags
& IB_SEND_SOLICITED
?
3645 cpu_to_be32(MLX4_WQE_CTRL_SOLICITED
) : 0) |
3646 ((wr
->send_flags
& IB_SEND_IP_CSUM
) ?
3647 cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM
|
3648 MLX4_WQE_CTRL_TCP_UDP_CSUM
) : 0) |
3651 ctrl
->imm
= send_ieth(wr
);
3653 wqe
+= sizeof *ctrl
;
3654 size
= sizeof *ctrl
/ 16;
3656 switch (qp
->mlx4_ib_qp_type
) {
3657 case MLX4_IB_QPT_RC
:
3658 case MLX4_IB_QPT_UC
:
3659 switch (wr
->opcode
) {
3660 case IB_WR_ATOMIC_CMP_AND_SWP
:
3661 case IB_WR_ATOMIC_FETCH_AND_ADD
:
3662 case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
:
3663 set_raddr_seg(wqe
, atomic_wr(wr
)->remote_addr
,
3664 atomic_wr(wr
)->rkey
);
3665 wqe
+= sizeof (struct mlx4_wqe_raddr_seg
);
3667 set_atomic_seg(wqe
, atomic_wr(wr
));
3668 wqe
+= sizeof (struct mlx4_wqe_atomic_seg
);
3670 size
+= (sizeof (struct mlx4_wqe_raddr_seg
) +
3671 sizeof (struct mlx4_wqe_atomic_seg
)) / 16;
3675 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP
:
3676 set_raddr_seg(wqe
, atomic_wr(wr
)->remote_addr
,
3677 atomic_wr(wr
)->rkey
);
3678 wqe
+= sizeof (struct mlx4_wqe_raddr_seg
);
3680 set_masked_atomic_seg(wqe
, atomic_wr(wr
));
3681 wqe
+= sizeof (struct mlx4_wqe_masked_atomic_seg
);
3683 size
+= (sizeof (struct mlx4_wqe_raddr_seg
) +
3684 sizeof (struct mlx4_wqe_masked_atomic_seg
)) / 16;
3688 case IB_WR_RDMA_READ
:
3689 case IB_WR_RDMA_WRITE
:
3690 case IB_WR_RDMA_WRITE_WITH_IMM
:
3691 set_raddr_seg(wqe
, rdma_wr(wr
)->remote_addr
,
3693 wqe
+= sizeof (struct mlx4_wqe_raddr_seg
);
3694 size
+= sizeof (struct mlx4_wqe_raddr_seg
) / 16;
3697 case IB_WR_LOCAL_INV
:
3698 ctrl
->srcrb_flags
|=
3699 cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER
);
3700 set_local_inv_seg(wqe
, wr
->ex
.invalidate_rkey
);
3701 wqe
+= sizeof (struct mlx4_wqe_local_inval_seg
);
3702 size
+= sizeof (struct mlx4_wqe_local_inval_seg
) / 16;
3706 ctrl
->srcrb_flags
|=
3707 cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER
);
3708 set_reg_seg(wqe
, reg_wr(wr
));
3709 wqe
+= sizeof(struct mlx4_wqe_fmr_seg
);
3710 size
+= sizeof(struct mlx4_wqe_fmr_seg
) / 16;
3714 /* No extra segments required for sends */
3719 case MLX4_IB_QPT_TUN_SMI_OWNER
:
3720 err
= build_sriov_qp0_header(to_msqp(qp
), ud_wr(wr
),
3722 if (unlikely(err
)) {
3727 size
+= seglen
/ 16;
3729 case MLX4_IB_QPT_TUN_SMI
:
3730 case MLX4_IB_QPT_TUN_GSI
:
3731 /* this is a UD qp used in MAD responses to slaves. */
3732 set_datagram_seg(wqe
, ud_wr(wr
));
3733 /* set the forced-loopback bit in the data seg av */
3734 *(__be32
*) wqe
|= cpu_to_be32(0x80000000);
3735 wqe
+= sizeof (struct mlx4_wqe_datagram_seg
);
3736 size
+= sizeof (struct mlx4_wqe_datagram_seg
) / 16;
3738 case MLX4_IB_QPT_UD
:
3739 set_datagram_seg(wqe
, ud_wr(wr
));
3740 wqe
+= sizeof (struct mlx4_wqe_datagram_seg
);
3741 size
+= sizeof (struct mlx4_wqe_datagram_seg
) / 16;
3743 if (wr
->opcode
== IB_WR_LSO
) {
3744 err
= build_lso_seg(wqe
, ud_wr(wr
), qp
, &seglen
,
3746 if (unlikely(err
)) {
3750 lso_wqe
= (__be32
*) wqe
;
3752 size
+= seglen
/ 16;
3756 case MLX4_IB_QPT_PROXY_SMI_OWNER
:
3757 err
= build_sriov_qp0_header(to_msqp(qp
), ud_wr(wr
),
3759 if (unlikely(err
)) {
3764 size
+= seglen
/ 16;
3765 /* to start tunnel header on a cache-line boundary */
3766 add_zero_len_inline(wqe
);
3769 build_tunnel_header(ud_wr(wr
), wqe
, &seglen
);
3771 size
+= seglen
/ 16;
3773 case MLX4_IB_QPT_PROXY_SMI
:
3774 case MLX4_IB_QPT_PROXY_GSI
:
3775 /* If we are tunneling special qps, this is a UD qp.
3776 * In this case we first add a UD segment targeting
3777 * the tunnel qp, and then add a header with address
3779 set_tunnel_datagram_seg(to_mdev(ibqp
->device
), wqe
,
3781 qp
->mlx4_ib_qp_type
);
3782 wqe
+= sizeof (struct mlx4_wqe_datagram_seg
);
3783 size
+= sizeof (struct mlx4_wqe_datagram_seg
) / 16;
3784 build_tunnel_header(ud_wr(wr
), wqe
, &seglen
);
3786 size
+= seglen
/ 16;
3789 case MLX4_IB_QPT_SMI
:
3790 case MLX4_IB_QPT_GSI
:
3791 err
= build_mlx_header(to_msqp(qp
), ud_wr(wr
), ctrl
,
3793 if (unlikely(err
)) {
3798 size
+= seglen
/ 16;
3806 * Write data segments in reverse order, so as to
3807 * overwrite cacheline stamp last within each
3808 * cacheline. This avoids issues with WQE
3813 dseg
+= wr
->num_sge
- 1;
3814 size
+= wr
->num_sge
* (sizeof (struct mlx4_wqe_data_seg
) / 16);
3816 /* Add one more inline data segment for ICRC for MLX sends */
3817 if (unlikely(qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_SMI
||
3818 qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_GSI
||
3819 qp
->mlx4_ib_qp_type
&
3820 (MLX4_IB_QPT_PROXY_SMI_OWNER
| MLX4_IB_QPT_TUN_SMI_OWNER
))) {
3821 set_mlx_icrc_seg(dseg
+ 1);
3822 size
+= sizeof (struct mlx4_wqe_data_seg
) / 16;
3825 for (i
= wr
->num_sge
- 1; i
>= 0; --i
, --dseg
)
3826 set_data_seg(dseg
, wr
->sg_list
+ i
);
3829 * Possibly overwrite stamping in cacheline with LSO
3830 * segment only after making sure all data segments
3834 *lso_wqe
= lso_hdr_sz
;
3836 ctrl
->qpn_vlan
.fence_size
= (wr
->send_flags
& IB_SEND_FENCE
?
3837 MLX4_WQE_CTRL_FENCE
: 0) | size
;
3840 * Make sure descriptor is fully written before
3841 * setting ownership bit (because HW can start
3842 * executing as soon as we do).
3846 if (wr
->opcode
< 0 || wr
->opcode
>= ARRAY_SIZE(mlx4_ib_opcode
)) {
3852 ctrl
->owner_opcode
= mlx4_ib_opcode
[wr
->opcode
] |
3853 (ind
& qp
->sq
.wqe_cnt
? cpu_to_be32(1 << 31) : 0) | blh
;
3855 stamp
= ind
+ qp
->sq_spare_wqes
;
3856 ind
+= DIV_ROUND_UP(size
* 16, 1U << qp
->sq
.wqe_shift
);
3859 * We can improve latency by not stamping the last
3860 * send queue WQE until after ringing the doorbell, so
3861 * only stamp here if there are still more WQEs to post.
3863 * Same optimization applies to padding with NOP wqe
3864 * in case of WQE shrinking (used to prevent wrap-around
3865 * in the middle of WR).
3868 stamp_send_wqe(qp
, stamp
, size
* 16);
3869 ind
= pad_wraparound(qp
, ind
);
3875 qp
->sq
.head
+= nreq
;
3878 * Make sure that descriptors are written before
3883 writel_relaxed(qp
->doorbell_qpn
,
3884 to_mdev(ibqp
->device
)->uar_map
+ MLX4_SEND_DOORBELL
);
3887 * Make sure doorbells don't leak out of SQ spinlock
3888 * and reach the HCA out of order.
3892 stamp_send_wqe(qp
, stamp
, size
* 16);
3894 ind
= pad_wraparound(qp
, ind
);
3895 qp
->sq_next_wqe
= ind
;
3898 spin_unlock_irqrestore(&qp
->sq
.lock
, flags
);
3903 int mlx4_ib_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
3904 struct ib_send_wr
**bad_wr
)
3906 return _mlx4_ib_post_send(ibqp
, wr
, bad_wr
, false);
3909 static int _mlx4_ib_post_recv(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
3910 struct ib_recv_wr
**bad_wr
, bool drain
)
3912 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
3913 struct mlx4_wqe_data_seg
*scat
;
3914 unsigned long flags
;
3920 struct mlx4_ib_dev
*mdev
= to_mdev(ibqp
->device
);
3922 max_gs
= qp
->rq
.max_gs
;
3923 spin_lock_irqsave(&qp
->rq
.lock
, flags
);
3925 if (mdev
->dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
&&
3933 ind
= qp
->rq
.head
& (qp
->rq
.wqe_cnt
- 1);
3935 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
3936 if (mlx4_wq_overflow(&qp
->rq
, nreq
, qp
->ibqp
.recv_cq
)) {
3942 if (unlikely(wr
->num_sge
> qp
->rq
.max_gs
)) {
3948 scat
= get_recv_wqe(qp
, ind
);
3950 if (qp
->mlx4_ib_qp_type
& (MLX4_IB_QPT_PROXY_SMI_OWNER
|
3951 MLX4_IB_QPT_PROXY_SMI
| MLX4_IB_QPT_PROXY_GSI
)) {
3952 ib_dma_sync_single_for_device(ibqp
->device
,
3953 qp
->sqp_proxy_rcv
[ind
].map
,
3954 sizeof (struct mlx4_ib_proxy_sqp_hdr
),
3957 cpu_to_be32(sizeof (struct mlx4_ib_proxy_sqp_hdr
));
3958 /* use dma lkey from upper layer entry */
3959 scat
->lkey
= cpu_to_be32(wr
->sg_list
->lkey
);
3960 scat
->addr
= cpu_to_be64(qp
->sqp_proxy_rcv
[ind
].map
);
3965 for (i
= 0; i
< wr
->num_sge
; ++i
)
3966 __set_data_seg(scat
+ i
, wr
->sg_list
+ i
);
3969 scat
[i
].byte_count
= 0;
3970 scat
[i
].lkey
= cpu_to_be32(MLX4_INVALID_LKEY
);
3974 qp
->rq
.wrid
[ind
] = wr
->wr_id
;
3976 ind
= (ind
+ 1) & (qp
->rq
.wqe_cnt
- 1);
3981 qp
->rq
.head
+= nreq
;
3984 * Make sure that descriptors are written before
3989 *qp
->db
.db
= cpu_to_be32(qp
->rq
.head
& 0xffff);
3992 spin_unlock_irqrestore(&qp
->rq
.lock
, flags
);
3997 int mlx4_ib_post_recv(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
3998 struct ib_recv_wr
**bad_wr
)
4000 return _mlx4_ib_post_recv(ibqp
, wr
, bad_wr
, false);
4003 static inline enum ib_qp_state
to_ib_qp_state(enum mlx4_qp_state mlx4_state
)
4005 switch (mlx4_state
) {
4006 case MLX4_QP_STATE_RST
: return IB_QPS_RESET
;
4007 case MLX4_QP_STATE_INIT
: return IB_QPS_INIT
;
4008 case MLX4_QP_STATE_RTR
: return IB_QPS_RTR
;
4009 case MLX4_QP_STATE_RTS
: return IB_QPS_RTS
;
4010 case MLX4_QP_STATE_SQ_DRAINING
:
4011 case MLX4_QP_STATE_SQD
: return IB_QPS_SQD
;
4012 case MLX4_QP_STATE_SQER
: return IB_QPS_SQE
;
4013 case MLX4_QP_STATE_ERR
: return IB_QPS_ERR
;
4018 static inline enum ib_mig_state
to_ib_mig_state(int mlx4_mig_state
)
4020 switch (mlx4_mig_state
) {
4021 case MLX4_QP_PM_ARMED
: return IB_MIG_ARMED
;
4022 case MLX4_QP_PM_REARM
: return IB_MIG_REARM
;
4023 case MLX4_QP_PM_MIGRATED
: return IB_MIG_MIGRATED
;
4028 static int to_ib_qp_access_flags(int mlx4_flags
)
4032 if (mlx4_flags
& MLX4_QP_BIT_RRE
)
4033 ib_flags
|= IB_ACCESS_REMOTE_READ
;
4034 if (mlx4_flags
& MLX4_QP_BIT_RWE
)
4035 ib_flags
|= IB_ACCESS_REMOTE_WRITE
;
4036 if (mlx4_flags
& MLX4_QP_BIT_RAE
)
4037 ib_flags
|= IB_ACCESS_REMOTE_ATOMIC
;
4042 static void to_rdma_ah_attr(struct mlx4_ib_dev
*ibdev
,
4043 struct rdma_ah_attr
*ah_attr
,
4044 struct mlx4_qp_path
*path
)
4046 struct mlx4_dev
*dev
= ibdev
->dev
;
4047 u8 port_num
= path
->sched_queue
& 0x40 ? 2 : 1;
4049 memset(ah_attr
, 0, sizeof(*ah_attr
));
4050 if (port_num
== 0 || port_num
> dev
->caps
.num_ports
)
4052 ah_attr
->type
= rdma_ah_find_type(&ibdev
->ib_dev
, port_num
);
4054 if (ah_attr
->type
== RDMA_AH_ATTR_TYPE_ROCE
)
4055 rdma_ah_set_sl(ah_attr
, ((path
->sched_queue
>> 3) & 0x7) |
4056 ((path
->sched_queue
& 4) << 1));
4058 rdma_ah_set_sl(ah_attr
, (path
->sched_queue
>> 2) & 0xf);
4059 rdma_ah_set_port_num(ah_attr
, port_num
);
4061 rdma_ah_set_dlid(ah_attr
, be16_to_cpu(path
->rlid
));
4062 rdma_ah_set_path_bits(ah_attr
, path
->grh_mylmc
& 0x7f);
4063 rdma_ah_set_static_rate(ah_attr
,
4064 path
->static_rate
? path
->static_rate
- 5 : 0);
4065 if (path
->grh_mylmc
& (1 << 7)) {
4066 rdma_ah_set_grh(ah_attr
, NULL
,
4067 be32_to_cpu(path
->tclass_flowlabel
) & 0xfffff,
4070 (be32_to_cpu(path
->tclass_flowlabel
)
4072 rdma_ah_set_dgid_raw(ah_attr
, path
->rgid
);
4076 int mlx4_ib_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*qp_attr
, int qp_attr_mask
,
4077 struct ib_qp_init_attr
*qp_init_attr
)
4079 struct mlx4_ib_dev
*dev
= to_mdev(ibqp
->device
);
4080 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
4081 struct mlx4_qp_context context
;
4085 if (ibqp
->rwq_ind_tbl
)
4088 mutex_lock(&qp
->mutex
);
4090 if (qp
->state
== IB_QPS_RESET
) {
4091 qp_attr
->qp_state
= IB_QPS_RESET
;
4095 err
= mlx4_qp_query(dev
->dev
, &qp
->mqp
, &context
);
4101 mlx4_state
= be32_to_cpu(context
.flags
) >> 28;
4103 qp
->state
= to_ib_qp_state(mlx4_state
);
4104 qp_attr
->qp_state
= qp
->state
;
4105 qp_attr
->path_mtu
= context
.mtu_msgmax
>> 5;
4106 qp_attr
->path_mig_state
=
4107 to_ib_mig_state((be32_to_cpu(context
.flags
) >> 11) & 0x3);
4108 qp_attr
->qkey
= be32_to_cpu(context
.qkey
);
4109 qp_attr
->rq_psn
= be32_to_cpu(context
.rnr_nextrecvpsn
) & 0xffffff;
4110 qp_attr
->sq_psn
= be32_to_cpu(context
.next_send_psn
) & 0xffffff;
4111 qp_attr
->dest_qp_num
= be32_to_cpu(context
.remote_qpn
) & 0xffffff;
4112 qp_attr
->qp_access_flags
=
4113 to_ib_qp_access_flags(be32_to_cpu(context
.params2
));
4115 if (qp
->ibqp
.qp_type
== IB_QPT_RC
|| qp
->ibqp
.qp_type
== IB_QPT_UC
) {
4116 to_rdma_ah_attr(dev
, &qp_attr
->ah_attr
, &context
.pri_path
);
4117 to_rdma_ah_attr(dev
, &qp_attr
->alt_ah_attr
, &context
.alt_path
);
4118 qp_attr
->alt_pkey_index
= context
.alt_path
.pkey_index
& 0x7f;
4119 qp_attr
->alt_port_num
=
4120 rdma_ah_get_port_num(&qp_attr
->alt_ah_attr
);
4123 qp_attr
->pkey_index
= context
.pri_path
.pkey_index
& 0x7f;
4124 if (qp_attr
->qp_state
== IB_QPS_INIT
)
4125 qp_attr
->port_num
= qp
->port
;
4127 qp_attr
->port_num
= context
.pri_path
.sched_queue
& 0x40 ? 2 : 1;
4129 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
4130 qp_attr
->sq_draining
= mlx4_state
== MLX4_QP_STATE_SQ_DRAINING
;
4132 qp_attr
->max_rd_atomic
= 1 << ((be32_to_cpu(context
.params1
) >> 21) & 0x7);
4134 qp_attr
->max_dest_rd_atomic
=
4135 1 << ((be32_to_cpu(context
.params2
) >> 21) & 0x7);
4136 qp_attr
->min_rnr_timer
=
4137 (be32_to_cpu(context
.rnr_nextrecvpsn
) >> 24) & 0x1f;
4138 qp_attr
->timeout
= context
.pri_path
.ackto
>> 3;
4139 qp_attr
->retry_cnt
= (be32_to_cpu(context
.params1
) >> 16) & 0x7;
4140 qp_attr
->rnr_retry
= (be32_to_cpu(context
.params1
) >> 13) & 0x7;
4141 qp_attr
->alt_timeout
= context
.alt_path
.ackto
>> 3;
4144 qp_attr
->cur_qp_state
= qp_attr
->qp_state
;
4145 qp_attr
->cap
.max_recv_wr
= qp
->rq
.wqe_cnt
;
4146 qp_attr
->cap
.max_recv_sge
= qp
->rq
.max_gs
;
4148 if (!ibqp
->uobject
) {
4149 qp_attr
->cap
.max_send_wr
= qp
->sq
.wqe_cnt
;
4150 qp_attr
->cap
.max_send_sge
= qp
->sq
.max_gs
;
4152 qp_attr
->cap
.max_send_wr
= 0;
4153 qp_attr
->cap
.max_send_sge
= 0;
4157 * We don't support inline sends for kernel QPs (yet), and we
4158 * don't know what userspace's value should be.
4160 qp_attr
->cap
.max_inline_data
= 0;
4162 qp_init_attr
->cap
= qp_attr
->cap
;
4164 qp_init_attr
->create_flags
= 0;
4165 if (qp
->flags
& MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
)
4166 qp_init_attr
->create_flags
|= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
;
4168 if (qp
->flags
& MLX4_IB_QP_LSO
)
4169 qp_init_attr
->create_flags
|= IB_QP_CREATE_IPOIB_UD_LSO
;
4171 if (qp
->flags
& MLX4_IB_QP_NETIF
)
4172 qp_init_attr
->create_flags
|= IB_QP_CREATE_NETIF_QP
;
4174 qp_init_attr
->sq_sig_type
=
4175 qp
->sq_signal_bits
== cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
) ?
4176 IB_SIGNAL_ALL_WR
: IB_SIGNAL_REQ_WR
;
4179 mutex_unlock(&qp
->mutex
);
4183 struct ib_wq
*mlx4_ib_create_wq(struct ib_pd
*pd
,
4184 struct ib_wq_init_attr
*init_attr
,
4185 struct ib_udata
*udata
)
4187 struct mlx4_ib_dev
*dev
;
4188 struct ib_qp_init_attr ib_qp_init_attr
;
4189 struct mlx4_ib_qp
*qp
;
4190 struct mlx4_ib_create_wq ucmd
;
4191 int err
, required_cmd_sz
;
4193 if (!(udata
&& pd
->uobject
))
4194 return ERR_PTR(-EINVAL
);
4196 required_cmd_sz
= offsetof(typeof(ucmd
), comp_mask
) +
4197 sizeof(ucmd
.comp_mask
);
4198 if (udata
->inlen
< required_cmd_sz
) {
4199 pr_debug("invalid inlen\n");
4200 return ERR_PTR(-EINVAL
);
4203 if (udata
->inlen
> sizeof(ucmd
) &&
4204 !ib_is_udata_cleared(udata
, sizeof(ucmd
),
4205 udata
->inlen
- sizeof(ucmd
))) {
4206 pr_debug("inlen is not supported\n");
4207 return ERR_PTR(-EOPNOTSUPP
);
4211 return ERR_PTR(-EOPNOTSUPP
);
4213 dev
= to_mdev(pd
->device
);
4215 if (init_attr
->wq_type
!= IB_WQT_RQ
) {
4216 pr_debug("unsupported wq type %d\n", init_attr
->wq_type
);
4217 return ERR_PTR(-EOPNOTSUPP
);
4220 if (init_attr
->create_flags
& ~IB_WQ_FLAGS_SCATTER_FCS
) {
4221 pr_debug("unsupported create_flags %u\n",
4222 init_attr
->create_flags
);
4223 return ERR_PTR(-EOPNOTSUPP
);
4226 qp
= kzalloc(sizeof(*qp
), GFP_KERNEL
);
4228 return ERR_PTR(-ENOMEM
);
4230 qp
->pri
.vid
= 0xFFFF;
4231 qp
->alt
.vid
= 0xFFFF;
4233 memset(&ib_qp_init_attr
, 0, sizeof(ib_qp_init_attr
));
4234 ib_qp_init_attr
.qp_context
= init_attr
->wq_context
;
4235 ib_qp_init_attr
.qp_type
= IB_QPT_RAW_PACKET
;
4236 ib_qp_init_attr
.cap
.max_recv_wr
= init_attr
->max_wr
;
4237 ib_qp_init_attr
.cap
.max_recv_sge
= init_attr
->max_sge
;
4238 ib_qp_init_attr
.recv_cq
= init_attr
->cq
;
4239 ib_qp_init_attr
.send_cq
= ib_qp_init_attr
.recv_cq
; /* Dummy CQ */
4241 if (init_attr
->create_flags
& IB_WQ_FLAGS_SCATTER_FCS
)
4242 ib_qp_init_attr
.create_flags
|= IB_QP_CREATE_SCATTER_FCS
;
4244 err
= create_qp_common(dev
, pd
, MLX4_IB_RWQ_SRC
, &ib_qp_init_attr
,
4248 return ERR_PTR(err
);
4251 qp
->ibwq
.event_handler
= init_attr
->event_handler
;
4252 qp
->ibwq
.wq_num
= qp
->mqp
.qpn
;
4253 qp
->ibwq
.state
= IB_WQS_RESET
;
4258 static int ib_wq2qp_state(enum ib_wq_state state
)
4262 return IB_QPS_RESET
;
4270 static int _mlx4_ib_modify_wq(struct ib_wq
*ibwq
, enum ib_wq_state new_state
)
4272 struct mlx4_ib_qp
*qp
= to_mqp((struct ib_qp
*)ibwq
);
4273 enum ib_qp_state qp_cur_state
;
4274 enum ib_qp_state qp_new_state
;
4278 /* ib_qp.state represents the WQ HW state while ib_wq.state represents
4279 * the WQ logic state.
4281 qp_cur_state
= qp
->state
;
4282 qp_new_state
= ib_wq2qp_state(new_state
);
4284 if (ib_wq2qp_state(new_state
) == qp_cur_state
)
4287 if (new_state
== IB_WQS_RDY
) {
4288 struct ib_qp_attr attr
= {};
4290 attr
.port_num
= qp
->port
;
4291 attr_mask
= IB_QP_PORT
;
4293 err
= __mlx4_ib_modify_qp(ibwq
, MLX4_IB_RWQ_SRC
, &attr
,
4294 attr_mask
, IB_QPS_RESET
, IB_QPS_INIT
);
4296 pr_debug("WQN=0x%06x failed to apply RST->INIT on the HW QP\n",
4301 qp_cur_state
= IB_QPS_INIT
;
4305 err
= __mlx4_ib_modify_qp(ibwq
, MLX4_IB_RWQ_SRC
, NULL
, attr_mask
,
4306 qp_cur_state
, qp_new_state
);
4308 if (err
&& (qp_cur_state
== IB_QPS_INIT
)) {
4309 qp_new_state
= IB_QPS_RESET
;
4310 if (__mlx4_ib_modify_qp(ibwq
, MLX4_IB_RWQ_SRC
, NULL
,
4311 attr_mask
, IB_QPS_INIT
, IB_QPS_RESET
)) {
4312 pr_warn("WQN=0x%06x failed with reverting HW's resources failure\n",
4314 qp_new_state
= IB_QPS_INIT
;
4318 qp
->state
= qp_new_state
;
4323 int mlx4_ib_modify_wq(struct ib_wq
*ibwq
, struct ib_wq_attr
*wq_attr
,
4324 u32 wq_attr_mask
, struct ib_udata
*udata
)
4326 struct mlx4_ib_qp
*qp
= to_mqp((struct ib_qp
*)ibwq
);
4327 struct mlx4_ib_modify_wq ucmd
= {};
4328 size_t required_cmd_sz
;
4329 enum ib_wq_state cur_state
, new_state
;
4332 required_cmd_sz
= offsetof(typeof(ucmd
), reserved
) +
4333 sizeof(ucmd
.reserved
);
4334 if (udata
->inlen
< required_cmd_sz
)
4337 if (udata
->inlen
> sizeof(ucmd
) &&
4338 !ib_is_udata_cleared(udata
, sizeof(ucmd
),
4339 udata
->inlen
- sizeof(ucmd
)))
4342 if (ib_copy_from_udata(&ucmd
, udata
, min(sizeof(ucmd
), udata
->inlen
)))
4345 if (ucmd
.comp_mask
|| ucmd
.reserved
)
4348 if (wq_attr_mask
& IB_WQ_FLAGS
)
4351 cur_state
= wq_attr_mask
& IB_WQ_CUR_STATE
? wq_attr
->curr_wq_state
:
4353 new_state
= wq_attr_mask
& IB_WQ_STATE
? wq_attr
->wq_state
: cur_state
;
4355 if (cur_state
< IB_WQS_RESET
|| cur_state
> IB_WQS_ERR
||
4356 new_state
< IB_WQS_RESET
|| new_state
> IB_WQS_ERR
)
4359 if ((new_state
== IB_WQS_RDY
) && (cur_state
== IB_WQS_ERR
))
4362 if ((new_state
== IB_WQS_ERR
) && (cur_state
== IB_WQS_RESET
))
4365 /* Need to protect against the parent RSS which also may modify WQ
4368 mutex_lock(&qp
->mutex
);
4370 /* Can update HW state only if a RSS QP has already associated to this
4371 * WQ, so we can apply its port on the WQ.
4374 err
= _mlx4_ib_modify_wq(ibwq
, new_state
);
4377 ibwq
->state
= new_state
;
4379 mutex_unlock(&qp
->mutex
);
4384 int mlx4_ib_destroy_wq(struct ib_wq
*ibwq
)
4386 struct mlx4_ib_dev
*dev
= to_mdev(ibwq
->device
);
4387 struct mlx4_ib_qp
*qp
= to_mqp((struct ib_qp
*)ibwq
);
4389 if (qp
->counter_index
)
4390 mlx4_ib_free_qp_counter(dev
, qp
);
4392 destroy_qp_common(dev
, qp
, MLX4_IB_RWQ_SRC
, 1);
4399 struct ib_rwq_ind_table
4400 *mlx4_ib_create_rwq_ind_table(struct ib_device
*device
,
4401 struct ib_rwq_ind_table_init_attr
*init_attr
,
4402 struct ib_udata
*udata
)
4404 struct ib_rwq_ind_table
*rwq_ind_table
;
4405 struct mlx4_ib_create_rwq_ind_tbl_resp resp
= {};
4406 unsigned int ind_tbl_size
= 1 << init_attr
->log_ind_tbl_size
;
4407 unsigned int base_wqn
;
4408 size_t min_resp_len
;
4412 if (udata
->inlen
> 0 &&
4413 !ib_is_udata_cleared(udata
, 0,
4415 return ERR_PTR(-EOPNOTSUPP
);
4417 min_resp_len
= offsetof(typeof(resp
), reserved
) + sizeof(resp
.reserved
);
4418 if (udata
->outlen
&& udata
->outlen
< min_resp_len
)
4419 return ERR_PTR(-EINVAL
);
4422 device
->attrs
.rss_caps
.max_rwq_indirection_table_size
) {
4423 pr_debug("log_ind_tbl_size = %d is bigger than supported = %d\n",
4425 device
->attrs
.rss_caps
.max_rwq_indirection_table_size
);
4426 return ERR_PTR(-EINVAL
);
4429 base_wqn
= init_attr
->ind_tbl
[0]->wq_num
;
4431 if (base_wqn
% ind_tbl_size
) {
4432 pr_debug("WQN=0x%x isn't aligned with indirection table size\n",
4434 return ERR_PTR(-EINVAL
);
4437 for (i
= 1; i
< ind_tbl_size
; i
++) {
4438 if (++base_wqn
!= init_attr
->ind_tbl
[i
]->wq_num
) {
4439 pr_debug("indirection table's WQNs aren't consecutive\n");
4440 return ERR_PTR(-EINVAL
);
4444 rwq_ind_table
= kzalloc(sizeof(*rwq_ind_table
), GFP_KERNEL
);
4446 return ERR_PTR(-ENOMEM
);
4448 if (udata
->outlen
) {
4449 resp
.response_length
= offsetof(typeof(resp
), response_length
) +
4450 sizeof(resp
.response_length
);
4451 err
= ib_copy_to_udata(udata
, &resp
, resp
.response_length
);
4456 return rwq_ind_table
;
4459 kfree(rwq_ind_table
);
4460 return ERR_PTR(err
);
4463 int mlx4_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table
*ib_rwq_ind_tbl
)
4465 kfree(ib_rwq_ind_tbl
);
4469 struct mlx4_ib_drain_cqe
{
4471 struct completion done
;
4474 static void mlx4_ib_drain_qp_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
4476 struct mlx4_ib_drain_cqe
*cqe
= container_of(wc
->wr_cqe
,
4477 struct mlx4_ib_drain_cqe
,
4480 complete(&cqe
->done
);
4483 /* This function returns only once the drained WR was completed */
4484 static void handle_drain_completion(struct ib_cq
*cq
,
4485 struct mlx4_ib_drain_cqe
*sdrain
,
4486 struct mlx4_ib_dev
*dev
)
4488 struct mlx4_dev
*mdev
= dev
->dev
;
4490 if (cq
->poll_ctx
== IB_POLL_DIRECT
) {
4491 while (wait_for_completion_timeout(&sdrain
->done
, HZ
/ 10) <= 0)
4492 ib_process_cq_direct(cq
, -1);
4496 if (mdev
->persist
->state
== MLX4_DEVICE_STATE_INTERNAL_ERROR
) {
4497 struct mlx4_ib_cq
*mcq
= to_mcq(cq
);
4498 bool triggered
= false;
4499 unsigned long flags
;
4501 spin_lock_irqsave(&dev
->reset_flow_resource_lock
, flags
);
4502 /* Make sure that the CQ handler won't run if wasn't run yet */
4503 if (!mcq
->mcq
.reset_notify_added
)
4504 mcq
->mcq
.reset_notify_added
= 1;
4507 spin_unlock_irqrestore(&dev
->reset_flow_resource_lock
, flags
);
4510 /* Wait for any scheduled/running task to be ended */
4511 switch (cq
->poll_ctx
) {
4512 case IB_POLL_SOFTIRQ
:
4513 irq_poll_disable(&cq
->iop
);
4514 irq_poll_enable(&cq
->iop
);
4516 case IB_POLL_WORKQUEUE
:
4517 cancel_work_sync(&cq
->work
);
4524 /* Run the CQ handler - this makes sure that the drain WR will
4525 * be processed if wasn't processed yet.
4527 mcq
->mcq
.comp(&mcq
->mcq
);
4530 wait_for_completion(&sdrain
->done
);
4533 void mlx4_ib_drain_sq(struct ib_qp
*qp
)
4535 struct ib_cq
*cq
= qp
->send_cq
;
4536 struct ib_qp_attr attr
= { .qp_state
= IB_QPS_ERR
};
4537 struct mlx4_ib_drain_cqe sdrain
;
4538 struct ib_send_wr
*bad_swr
;
4539 struct ib_rdma_wr swr
= {
4542 { .wr_cqe
= &sdrain
.cqe
, },
4543 .opcode
= IB_WR_RDMA_WRITE
,
4547 struct mlx4_ib_dev
*dev
= to_mdev(qp
->device
);
4548 struct mlx4_dev
*mdev
= dev
->dev
;
4550 ret
= ib_modify_qp(qp
, &attr
, IB_QP_STATE
);
4551 if (ret
&& mdev
->persist
->state
!= MLX4_DEVICE_STATE_INTERNAL_ERROR
) {
4552 WARN_ONCE(ret
, "failed to drain send queue: %d\n", ret
);
4556 sdrain
.cqe
.done
= mlx4_ib_drain_qp_done
;
4557 init_completion(&sdrain
.done
);
4559 ret
= _mlx4_ib_post_send(qp
, &swr
.wr
, &bad_swr
, true);
4561 WARN_ONCE(ret
, "failed to drain send queue: %d\n", ret
);
4565 handle_drain_completion(cq
, &sdrain
, dev
);
4568 void mlx4_ib_drain_rq(struct ib_qp
*qp
)
4570 struct ib_cq
*cq
= qp
->recv_cq
;
4571 struct ib_qp_attr attr
= { .qp_state
= IB_QPS_ERR
};
4572 struct mlx4_ib_drain_cqe rdrain
;
4573 struct ib_recv_wr rwr
= {}, *bad_rwr
;
4575 struct mlx4_ib_dev
*dev
= to_mdev(qp
->device
);
4576 struct mlx4_dev
*mdev
= dev
->dev
;
4578 ret
= ib_modify_qp(qp
, &attr
, IB_QP_STATE
);
4579 if (ret
&& mdev
->persist
->state
!= MLX4_DEVICE_STATE_INTERNAL_ERROR
) {
4580 WARN_ONCE(ret
, "failed to drain recv queue: %d\n", ret
);
4584 rwr
.wr_cqe
= &rdrain
.cqe
;
4585 rdrain
.cqe
.done
= mlx4_ib_drain_qp_done
;
4586 init_completion(&rdrain
.done
);
4588 ret
= _mlx4_ib_post_recv(qp
, &rwr
, &bad_rwr
, true);
4590 WARN_ONCE(ret
, "failed to drain recv queue: %d\n", ret
);
4594 handle_drain_completion(cq
, &rdrain
, dev
);