2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/log2.h>
35 #include <linux/etherdevice.h>
37 #include <linux/slab.h>
38 #include <linux/netdevice.h>
39 #include <linux/vmalloc.h>
41 #include <rdma/ib_cache.h>
42 #include <rdma/ib_pack.h>
43 #include <rdma/ib_addr.h>
44 #include <rdma/ib_mad.h>
46 #include <linux/mlx4/driver.h>
47 #include <linux/mlx4/qp.h>
50 #include <rdma/mlx4-abi.h>
52 static void mlx4_ib_lock_cqs(struct mlx4_ib_cq
*send_cq
,
53 struct mlx4_ib_cq
*recv_cq
);
54 static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq
*send_cq
,
55 struct mlx4_ib_cq
*recv_cq
);
58 MLX4_IB_ACK_REQ_FREQ
= 8,
62 MLX4_IB_DEFAULT_SCHED_QUEUE
= 0x83,
63 MLX4_IB_DEFAULT_QP0_SCHED_QUEUE
= 0x3f,
64 MLX4_IB_LINK_TYPE_IB
= 0,
65 MLX4_IB_LINK_TYPE_ETH
= 1
70 * Largest possible UD header: send with GRH and immediate
71 * data plus 18 bytes for an Ethernet header with VLAN/802.1Q
72 * tag. (LRH would only use 8 bytes, so Ethernet is the
75 MLX4_IB_UD_HEADER_SIZE
= 82,
76 MLX4_IB_LSO_HEADER_SPARE
= 128,
84 struct ib_ud_header ud_header
;
85 u8 header_buf
[MLX4_IB_UD_HEADER_SIZE
];
86 struct ib_qp
*roce_v2_gsi
;
90 MLX4_IB_MIN_SQ_STRIDE
= 6,
91 MLX4_IB_CACHE_LINE_SIZE
= 64,
96 MLX4_RAW_QP_MSGMAX
= 31,
103 static const __be32 mlx4_ib_opcode
[] = {
104 [IB_WR_SEND
] = cpu_to_be32(MLX4_OPCODE_SEND
),
105 [IB_WR_LSO
] = cpu_to_be32(MLX4_OPCODE_LSO
),
106 [IB_WR_SEND_WITH_IMM
] = cpu_to_be32(MLX4_OPCODE_SEND_IMM
),
107 [IB_WR_RDMA_WRITE
] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE
),
108 [IB_WR_RDMA_WRITE_WITH_IMM
] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM
),
109 [IB_WR_RDMA_READ
] = cpu_to_be32(MLX4_OPCODE_RDMA_READ
),
110 [IB_WR_ATOMIC_CMP_AND_SWP
] = cpu_to_be32(MLX4_OPCODE_ATOMIC_CS
),
111 [IB_WR_ATOMIC_FETCH_AND_ADD
] = cpu_to_be32(MLX4_OPCODE_ATOMIC_FA
),
112 [IB_WR_SEND_WITH_INV
] = cpu_to_be32(MLX4_OPCODE_SEND_INVAL
),
113 [IB_WR_LOCAL_INV
] = cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL
),
114 [IB_WR_REG_MR
] = cpu_to_be32(MLX4_OPCODE_FMR
),
115 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP
] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_CS
),
116 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_FA
),
119 static struct mlx4_ib_sqp
*to_msqp(struct mlx4_ib_qp
*mqp
)
121 return container_of(mqp
, struct mlx4_ib_sqp
, qp
);
124 static int is_tunnel_qp(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
)
126 if (!mlx4_is_master(dev
->dev
))
129 return qp
->mqp
.qpn
>= dev
->dev
->phys_caps
.base_tunnel_sqpn
&&
130 qp
->mqp
.qpn
< dev
->dev
->phys_caps
.base_tunnel_sqpn
+
134 static int is_sqp(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
)
139 /* PPF or Native -- real SQP */
140 real_sqp
= ((mlx4_is_master(dev
->dev
) || !mlx4_is_mfunc(dev
->dev
)) &&
141 qp
->mqp
.qpn
>= dev
->dev
->phys_caps
.base_sqpn
&&
142 qp
->mqp
.qpn
<= dev
->dev
->phys_caps
.base_sqpn
+ 3);
145 /* VF or PF -- proxy SQP */
146 if (mlx4_is_mfunc(dev
->dev
)) {
147 for (i
= 0; i
< dev
->dev
->caps
.num_ports
; i
++) {
148 if (qp
->mqp
.qpn
== dev
->dev
->caps
.qp0_proxy
[i
] ||
149 qp
->mqp
.qpn
== dev
->dev
->caps
.qp1_proxy
[i
]) {
158 return !!(qp
->flags
& MLX4_IB_ROCE_V2_GSI_QP
);
161 /* used for INIT/CLOSE port logic */
162 static int is_qp0(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
)
167 /* PPF or Native -- real QP0 */
168 real_qp0
= ((mlx4_is_master(dev
->dev
) || !mlx4_is_mfunc(dev
->dev
)) &&
169 qp
->mqp
.qpn
>= dev
->dev
->phys_caps
.base_sqpn
&&
170 qp
->mqp
.qpn
<= dev
->dev
->phys_caps
.base_sqpn
+ 1);
173 /* VF or PF -- proxy QP0 */
174 if (mlx4_is_mfunc(dev
->dev
)) {
175 for (i
= 0; i
< dev
->dev
->caps
.num_ports
; i
++) {
176 if (qp
->mqp
.qpn
== dev
->dev
->caps
.qp0_proxy
[i
]) {
185 static void *get_wqe(struct mlx4_ib_qp
*qp
, int offset
)
187 return mlx4_buf_offset(&qp
->buf
, offset
);
190 static void *get_recv_wqe(struct mlx4_ib_qp
*qp
, int n
)
192 return get_wqe(qp
, qp
->rq
.offset
+ (n
<< qp
->rq
.wqe_shift
));
195 static void *get_send_wqe(struct mlx4_ib_qp
*qp
, int n
)
197 return get_wqe(qp
, qp
->sq
.offset
+ (n
<< qp
->sq
.wqe_shift
));
201 * Stamp a SQ WQE so that it is invalid if prefetched by marking the
202 * first four bytes of every 64 byte chunk with
203 * 0x7FFFFFF | (invalid_ownership_value << 31).
205 * When the max work request size is less than or equal to the WQE
206 * basic block size, as an optimization, we can stamp all WQEs with
207 * 0xffffffff, and skip the very first chunk of each WQE.
209 static void stamp_send_wqe(struct mlx4_ib_qp
*qp
, int n
, int size
)
217 struct mlx4_wqe_ctrl_seg
*ctrl
;
219 if (qp
->sq_max_wqes_per_wr
> 1) {
220 s
= roundup(size
, 1U << qp
->sq
.wqe_shift
);
221 for (i
= 0; i
< s
; i
+= 64) {
222 ind
= (i
>> qp
->sq
.wqe_shift
) + n
;
223 stamp
= ind
& qp
->sq
.wqe_cnt
? cpu_to_be32(0x7fffffff) :
224 cpu_to_be32(0xffffffff);
225 buf
= get_send_wqe(qp
, ind
& (qp
->sq
.wqe_cnt
- 1));
226 wqe
= buf
+ (i
& ((1 << qp
->sq
.wqe_shift
) - 1));
230 ctrl
= buf
= get_send_wqe(qp
, n
& (qp
->sq
.wqe_cnt
- 1));
231 s
= (ctrl
->qpn_vlan
.fence_size
& 0x3f) << 4;
232 for (i
= 64; i
< s
; i
+= 64) {
234 *wqe
= cpu_to_be32(0xffffffff);
239 static void post_nop_wqe(struct mlx4_ib_qp
*qp
, int n
, int size
)
241 struct mlx4_wqe_ctrl_seg
*ctrl
;
242 struct mlx4_wqe_inline_seg
*inl
;
246 ctrl
= wqe
= get_send_wqe(qp
, n
& (qp
->sq
.wqe_cnt
- 1));
247 s
= sizeof(struct mlx4_wqe_ctrl_seg
);
249 if (qp
->ibqp
.qp_type
== IB_QPT_UD
) {
250 struct mlx4_wqe_datagram_seg
*dgram
= wqe
+ sizeof *ctrl
;
251 struct mlx4_av
*av
= (struct mlx4_av
*)dgram
->av
;
252 memset(dgram
, 0, sizeof *dgram
);
253 av
->port_pd
= cpu_to_be32((qp
->port
<< 24) | to_mpd(qp
->ibqp
.pd
)->pdn
);
254 s
+= sizeof(struct mlx4_wqe_datagram_seg
);
257 /* Pad the remainder of the WQE with an inline data segment. */
260 inl
->byte_count
= cpu_to_be32(1 << 31 | (size
- s
- sizeof *inl
));
262 ctrl
->srcrb_flags
= 0;
263 ctrl
->qpn_vlan
.fence_size
= size
/ 16;
265 * Make sure descriptor is fully written before setting ownership bit
266 * (because HW can start executing as soon as we do).
270 ctrl
->owner_opcode
= cpu_to_be32(MLX4_OPCODE_NOP
| MLX4_WQE_CTRL_NEC
) |
271 (n
& qp
->sq
.wqe_cnt
? cpu_to_be32(1 << 31) : 0);
273 stamp_send_wqe(qp
, n
+ qp
->sq_spare_wqes
, size
);
276 /* Post NOP WQE to prevent wrap-around in the middle of WR */
277 static inline unsigned pad_wraparound(struct mlx4_ib_qp
*qp
, int ind
)
279 unsigned s
= qp
->sq
.wqe_cnt
- (ind
& (qp
->sq
.wqe_cnt
- 1));
280 if (unlikely(s
< qp
->sq_max_wqes_per_wr
)) {
281 post_nop_wqe(qp
, ind
, s
<< qp
->sq
.wqe_shift
);
287 static void mlx4_ib_qp_event(struct mlx4_qp
*qp
, enum mlx4_event type
)
289 struct ib_event event
;
290 struct ib_qp
*ibqp
= &to_mibqp(qp
)->ibqp
;
292 if (type
== MLX4_EVENT_TYPE_PATH_MIG
)
293 to_mibqp(qp
)->port
= to_mibqp(qp
)->alt_port
;
295 if (ibqp
->event_handler
) {
296 event
.device
= ibqp
->device
;
297 event
.element
.qp
= ibqp
;
299 case MLX4_EVENT_TYPE_PATH_MIG
:
300 event
.event
= IB_EVENT_PATH_MIG
;
302 case MLX4_EVENT_TYPE_COMM_EST
:
303 event
.event
= IB_EVENT_COMM_EST
;
305 case MLX4_EVENT_TYPE_SQ_DRAINED
:
306 event
.event
= IB_EVENT_SQ_DRAINED
;
308 case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE
:
309 event
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
311 case MLX4_EVENT_TYPE_WQ_CATAS_ERROR
:
312 event
.event
= IB_EVENT_QP_FATAL
;
314 case MLX4_EVENT_TYPE_PATH_MIG_FAILED
:
315 event
.event
= IB_EVENT_PATH_MIG_ERR
;
317 case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR
:
318 event
.event
= IB_EVENT_QP_REQ_ERR
;
320 case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR
:
321 event
.event
= IB_EVENT_QP_ACCESS_ERR
;
324 pr_warn("Unexpected event type %d "
325 "on QP %06x\n", type
, qp
->qpn
);
329 ibqp
->event_handler(&event
, ibqp
->qp_context
);
333 static int send_wqe_overhead(enum mlx4_ib_qp_type type
, u32 flags
)
336 * UD WQEs must have a datagram segment.
337 * RC and UC WQEs might have a remote address segment.
338 * MLX WQEs need two extra inline data segments (for the UD
339 * header and space for the ICRC).
343 return sizeof (struct mlx4_wqe_ctrl_seg
) +
344 sizeof (struct mlx4_wqe_datagram_seg
) +
345 ((flags
& MLX4_IB_QP_LSO
) ? MLX4_IB_LSO_HEADER_SPARE
: 0);
346 case MLX4_IB_QPT_PROXY_SMI_OWNER
:
347 case MLX4_IB_QPT_PROXY_SMI
:
348 case MLX4_IB_QPT_PROXY_GSI
:
349 return sizeof (struct mlx4_wqe_ctrl_seg
) +
350 sizeof (struct mlx4_wqe_datagram_seg
) + 64;
351 case MLX4_IB_QPT_TUN_SMI_OWNER
:
352 case MLX4_IB_QPT_TUN_GSI
:
353 return sizeof (struct mlx4_wqe_ctrl_seg
) +
354 sizeof (struct mlx4_wqe_datagram_seg
);
357 return sizeof (struct mlx4_wqe_ctrl_seg
) +
358 sizeof (struct mlx4_wqe_raddr_seg
);
360 return sizeof (struct mlx4_wqe_ctrl_seg
) +
361 sizeof (struct mlx4_wqe_masked_atomic_seg
) +
362 sizeof (struct mlx4_wqe_raddr_seg
);
363 case MLX4_IB_QPT_SMI
:
364 case MLX4_IB_QPT_GSI
:
365 return sizeof (struct mlx4_wqe_ctrl_seg
) +
366 ALIGN(MLX4_IB_UD_HEADER_SIZE
+
367 DIV_ROUND_UP(MLX4_IB_UD_HEADER_SIZE
,
369 sizeof (struct mlx4_wqe_inline_seg
),
370 sizeof (struct mlx4_wqe_data_seg
)) +
372 sizeof (struct mlx4_wqe_inline_seg
),
373 sizeof (struct mlx4_wqe_data_seg
));
375 return sizeof (struct mlx4_wqe_ctrl_seg
);
379 static int set_rq_size(struct mlx4_ib_dev
*dev
, struct ib_qp_cap
*cap
,
380 int is_user
, int has_rq
, struct mlx4_ib_qp
*qp
)
382 /* Sanity check RQ size before proceeding */
383 if (cap
->max_recv_wr
> dev
->dev
->caps
.max_wqes
- MLX4_IB_SQ_MAX_SPARE
||
384 cap
->max_recv_sge
> min(dev
->dev
->caps
.max_sq_sg
, dev
->dev
->caps
.max_rq_sg
))
388 if (cap
->max_recv_wr
)
391 qp
->rq
.wqe_cnt
= qp
->rq
.max_gs
= 0;
393 /* HW requires >= 1 RQ entry with >= 1 gather entry */
394 if (is_user
&& (!cap
->max_recv_wr
|| !cap
->max_recv_sge
))
397 qp
->rq
.wqe_cnt
= roundup_pow_of_two(max(1U, cap
->max_recv_wr
));
398 qp
->rq
.max_gs
= roundup_pow_of_two(max(1U, cap
->max_recv_sge
));
399 qp
->rq
.wqe_shift
= ilog2(qp
->rq
.max_gs
* sizeof (struct mlx4_wqe_data_seg
));
402 /* leave userspace return values as they were, so as not to break ABI */
404 cap
->max_recv_wr
= qp
->rq
.max_post
= qp
->rq
.wqe_cnt
;
405 cap
->max_recv_sge
= qp
->rq
.max_gs
;
407 cap
->max_recv_wr
= qp
->rq
.max_post
=
408 min(dev
->dev
->caps
.max_wqes
- MLX4_IB_SQ_MAX_SPARE
, qp
->rq
.wqe_cnt
);
409 cap
->max_recv_sge
= min(qp
->rq
.max_gs
,
410 min(dev
->dev
->caps
.max_sq_sg
,
411 dev
->dev
->caps
.max_rq_sg
));
417 static int set_kernel_sq_size(struct mlx4_ib_dev
*dev
, struct ib_qp_cap
*cap
,
418 enum mlx4_ib_qp_type type
, struct mlx4_ib_qp
*qp
,
423 /* Sanity check SQ size before proceeding */
424 if (cap
->max_send_wr
> (dev
->dev
->caps
.max_wqes
- MLX4_IB_SQ_MAX_SPARE
) ||
425 cap
->max_send_sge
> min(dev
->dev
->caps
.max_sq_sg
, dev
->dev
->caps
.max_rq_sg
) ||
426 cap
->max_inline_data
+ send_wqe_overhead(type
, qp
->flags
) +
427 sizeof (struct mlx4_wqe_inline_seg
) > dev
->dev
->caps
.max_sq_desc_sz
)
431 * For MLX transport we need 2 extra S/G entries:
432 * one for the header and one for the checksum at the end
434 if ((type
== MLX4_IB_QPT_SMI
|| type
== MLX4_IB_QPT_GSI
||
435 type
& (MLX4_IB_QPT_PROXY_SMI_OWNER
| MLX4_IB_QPT_TUN_SMI_OWNER
)) &&
436 cap
->max_send_sge
+ 2 > dev
->dev
->caps
.max_sq_sg
)
439 s
= max(cap
->max_send_sge
* sizeof (struct mlx4_wqe_data_seg
),
440 cap
->max_inline_data
+ sizeof (struct mlx4_wqe_inline_seg
)) +
441 send_wqe_overhead(type
, qp
->flags
);
443 if (s
> dev
->dev
->caps
.max_sq_desc_sz
)
447 * Hermon supports shrinking WQEs, such that a single work
448 * request can include multiple units of 1 << wqe_shift. This
449 * way, work requests can differ in size, and do not have to
450 * be a power of 2 in size, saving memory and speeding up send
451 * WR posting. Unfortunately, if we do this then the
452 * wqe_index field in CQEs can't be used to look up the WR ID
453 * anymore, so we do this only if selective signaling is off.
455 * Further, on 32-bit platforms, we can't use vmap() to make
456 * the QP buffer virtually contiguous. Thus we have to use
457 * constant-sized WRs to make sure a WR is always fully within
458 * a single page-sized chunk.
460 * Finally, we use NOP work requests to pad the end of the
461 * work queue, to avoid wrap-around in the middle of WR. We
462 * set NEC bit to avoid getting completions with error for
463 * these NOP WRs, but since NEC is only supported starting
464 * with firmware 2.2.232, we use constant-sized WRs for older
467 * And, since MLX QPs only support SEND, we use constant-sized
470 * We look for the smallest value of wqe_shift such that the
471 * resulting number of wqes does not exceed device
474 * We set WQE size to at least 64 bytes, this way stamping
475 * invalidates each WQE.
477 if (shrink_wqe
&& dev
->dev
->caps
.fw_ver
>= MLX4_FW_VER_WQE_CTRL_NEC
&&
478 qp
->sq_signal_bits
&& BITS_PER_LONG
== 64 &&
479 type
!= MLX4_IB_QPT_SMI
&& type
!= MLX4_IB_QPT_GSI
&&
480 !(type
& (MLX4_IB_QPT_PROXY_SMI_OWNER
| MLX4_IB_QPT_PROXY_SMI
|
481 MLX4_IB_QPT_PROXY_GSI
| MLX4_IB_QPT_TUN_SMI_OWNER
)))
482 qp
->sq
.wqe_shift
= ilog2(64);
484 qp
->sq
.wqe_shift
= ilog2(roundup_pow_of_two(s
));
487 qp
->sq_max_wqes_per_wr
= DIV_ROUND_UP(s
, 1U << qp
->sq
.wqe_shift
);
490 * We need to leave 2 KB + 1 WR of headroom in the SQ to
491 * allow HW to prefetch.
493 qp
->sq_spare_wqes
= (2048 >> qp
->sq
.wqe_shift
) + qp
->sq_max_wqes_per_wr
;
494 qp
->sq
.wqe_cnt
= roundup_pow_of_two(cap
->max_send_wr
*
495 qp
->sq_max_wqes_per_wr
+
498 if (qp
->sq
.wqe_cnt
<= dev
->dev
->caps
.max_wqes
)
501 if (qp
->sq_max_wqes_per_wr
<= 1)
507 qp
->sq
.max_gs
= (min(dev
->dev
->caps
.max_sq_desc_sz
,
508 (qp
->sq_max_wqes_per_wr
<< qp
->sq
.wqe_shift
)) -
509 send_wqe_overhead(type
, qp
->flags
)) /
510 sizeof (struct mlx4_wqe_data_seg
);
512 qp
->buf_size
= (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
) +
513 (qp
->sq
.wqe_cnt
<< qp
->sq
.wqe_shift
);
514 if (qp
->rq
.wqe_shift
> qp
->sq
.wqe_shift
) {
516 qp
->sq
.offset
= qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
;
518 qp
->rq
.offset
= qp
->sq
.wqe_cnt
<< qp
->sq
.wqe_shift
;
522 cap
->max_send_wr
= qp
->sq
.max_post
=
523 (qp
->sq
.wqe_cnt
- qp
->sq_spare_wqes
) / qp
->sq_max_wqes_per_wr
;
524 cap
->max_send_sge
= min(qp
->sq
.max_gs
,
525 min(dev
->dev
->caps
.max_sq_sg
,
526 dev
->dev
->caps
.max_rq_sg
));
527 /* We don't support inline sends for kernel QPs (yet) */
528 cap
->max_inline_data
= 0;
533 static int set_user_sq_size(struct mlx4_ib_dev
*dev
,
534 struct mlx4_ib_qp
*qp
,
535 struct mlx4_ib_create_qp
*ucmd
)
537 /* Sanity check SQ size before proceeding */
538 if ((1 << ucmd
->log_sq_bb_count
) > dev
->dev
->caps
.max_wqes
||
539 ucmd
->log_sq_stride
>
540 ilog2(roundup_pow_of_two(dev
->dev
->caps
.max_sq_desc_sz
)) ||
541 ucmd
->log_sq_stride
< MLX4_IB_MIN_SQ_STRIDE
)
544 qp
->sq
.wqe_cnt
= 1 << ucmd
->log_sq_bb_count
;
545 qp
->sq
.wqe_shift
= ucmd
->log_sq_stride
;
547 qp
->buf_size
= (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
) +
548 (qp
->sq
.wqe_cnt
<< qp
->sq
.wqe_shift
);
553 static int alloc_proxy_bufs(struct ib_device
*dev
, struct mlx4_ib_qp
*qp
)
558 kmalloc(sizeof (struct mlx4_ib_buf
) * qp
->rq
.wqe_cnt
,
560 if (!qp
->sqp_proxy_rcv
)
562 for (i
= 0; i
< qp
->rq
.wqe_cnt
; i
++) {
563 qp
->sqp_proxy_rcv
[i
].addr
=
564 kmalloc(sizeof (struct mlx4_ib_proxy_sqp_hdr
),
566 if (!qp
->sqp_proxy_rcv
[i
].addr
)
568 qp
->sqp_proxy_rcv
[i
].map
=
569 ib_dma_map_single(dev
, qp
->sqp_proxy_rcv
[i
].addr
,
570 sizeof (struct mlx4_ib_proxy_sqp_hdr
),
572 if (ib_dma_mapping_error(dev
, qp
->sqp_proxy_rcv
[i
].map
)) {
573 kfree(qp
->sqp_proxy_rcv
[i
].addr
);
582 ib_dma_unmap_single(dev
, qp
->sqp_proxy_rcv
[i
].map
,
583 sizeof (struct mlx4_ib_proxy_sqp_hdr
),
585 kfree(qp
->sqp_proxy_rcv
[i
].addr
);
587 kfree(qp
->sqp_proxy_rcv
);
588 qp
->sqp_proxy_rcv
= NULL
;
592 static void free_proxy_bufs(struct ib_device
*dev
, struct mlx4_ib_qp
*qp
)
596 for (i
= 0; i
< qp
->rq
.wqe_cnt
; i
++) {
597 ib_dma_unmap_single(dev
, qp
->sqp_proxy_rcv
[i
].map
,
598 sizeof (struct mlx4_ib_proxy_sqp_hdr
),
600 kfree(qp
->sqp_proxy_rcv
[i
].addr
);
602 kfree(qp
->sqp_proxy_rcv
);
605 static int qp_has_rq(struct ib_qp_init_attr
*attr
)
607 if (attr
->qp_type
== IB_QPT_XRC_INI
|| attr
->qp_type
== IB_QPT_XRC_TGT
)
613 static int qp0_enabled_vf(struct mlx4_dev
*dev
, int qpn
)
616 for (i
= 0; i
< dev
->caps
.num_ports
; i
++) {
617 if (qpn
== dev
->caps
.qp0_proxy
[i
])
618 return !!dev
->caps
.qp0_qkey
[i
];
623 static void mlx4_ib_free_qp_counter(struct mlx4_ib_dev
*dev
,
624 struct mlx4_ib_qp
*qp
)
626 mutex_lock(&dev
->counters_table
[qp
->port
- 1].mutex
);
627 mlx4_counter_free(dev
->dev
, qp
->counter_index
->index
);
628 list_del(&qp
->counter_index
->list
);
629 mutex_unlock(&dev
->counters_table
[qp
->port
- 1].mutex
);
631 kfree(qp
->counter_index
);
632 qp
->counter_index
= NULL
;
635 static int create_qp_common(struct mlx4_ib_dev
*dev
, struct ib_pd
*pd
,
636 struct ib_qp_init_attr
*init_attr
,
637 struct ib_udata
*udata
, int sqpn
, struct mlx4_ib_qp
**caller_qp
,
642 struct ib_qp_cap backup_cap
;
643 struct mlx4_ib_sqp
*sqp
= NULL
;
644 struct mlx4_ib_qp
*qp
;
645 enum mlx4_ib_qp_type qp_type
= (enum mlx4_ib_qp_type
) init_attr
->qp_type
;
646 struct mlx4_ib_cq
*mcq
;
649 /* When tunneling special qps, we use a plain UD qp */
651 if (mlx4_is_mfunc(dev
->dev
) &&
652 (!mlx4_is_master(dev
->dev
) ||
653 !(init_attr
->create_flags
& MLX4_IB_SRIOV_SQP
))) {
654 if (init_attr
->qp_type
== IB_QPT_GSI
)
655 qp_type
= MLX4_IB_QPT_PROXY_GSI
;
657 if (mlx4_is_master(dev
->dev
) ||
658 qp0_enabled_vf(dev
->dev
, sqpn
))
659 qp_type
= MLX4_IB_QPT_PROXY_SMI_OWNER
;
661 qp_type
= MLX4_IB_QPT_PROXY_SMI
;
665 /* add extra sg entry for tunneling */
666 init_attr
->cap
.max_recv_sge
++;
667 } else if (init_attr
->create_flags
& MLX4_IB_SRIOV_TUNNEL_QP
) {
668 struct mlx4_ib_qp_tunnel_init_attr
*tnl_init
=
669 container_of(init_attr
,
670 struct mlx4_ib_qp_tunnel_init_attr
, init_attr
);
671 if ((tnl_init
->proxy_qp_type
!= IB_QPT_SMI
&&
672 tnl_init
->proxy_qp_type
!= IB_QPT_GSI
) ||
673 !mlx4_is_master(dev
->dev
))
675 if (tnl_init
->proxy_qp_type
== IB_QPT_GSI
)
676 qp_type
= MLX4_IB_QPT_TUN_GSI
;
677 else if (tnl_init
->slave
== mlx4_master_func_num(dev
->dev
) ||
678 mlx4_vf_smi_enabled(dev
->dev
, tnl_init
->slave
,
680 qp_type
= MLX4_IB_QPT_TUN_SMI_OWNER
;
682 qp_type
= MLX4_IB_QPT_TUN_SMI
;
683 /* we are definitely in the PPF here, since we are creating
684 * tunnel QPs. base_tunnel_sqpn is therefore valid. */
685 qpn
= dev
->dev
->phys_caps
.base_tunnel_sqpn
+ 8 * tnl_init
->slave
686 + tnl_init
->proxy_qp_type
* 2 + tnl_init
->port
- 1;
691 if (qp_type
== MLX4_IB_QPT_SMI
|| qp_type
== MLX4_IB_QPT_GSI
||
692 (qp_type
& (MLX4_IB_QPT_PROXY_SMI
| MLX4_IB_QPT_PROXY_SMI_OWNER
|
693 MLX4_IB_QPT_PROXY_GSI
| MLX4_IB_QPT_TUN_SMI_OWNER
))) {
694 sqp
= kzalloc(sizeof (struct mlx4_ib_sqp
), gfp
);
698 qp
->pri
.vid
= 0xFFFF;
699 qp
->alt
.vid
= 0xFFFF;
701 qp
= kzalloc(sizeof (struct mlx4_ib_qp
), gfp
);
704 qp
->pri
.vid
= 0xFFFF;
705 qp
->alt
.vid
= 0xFFFF;
710 qp
->mlx4_ib_qp_type
= qp_type
;
712 mutex_init(&qp
->mutex
);
713 spin_lock_init(&qp
->sq
.lock
);
714 spin_lock_init(&qp
->rq
.lock
);
715 INIT_LIST_HEAD(&qp
->gid_list
);
716 INIT_LIST_HEAD(&qp
->steering_rules
);
718 qp
->state
= IB_QPS_RESET
;
719 if (init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
)
720 qp
->sq_signal_bits
= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
);
722 err
= set_rq_size(dev
, &init_attr
->cap
, !!pd
->uobject
, qp_has_rq(init_attr
), qp
);
727 struct mlx4_ib_create_qp ucmd
;
729 if (ib_copy_from_udata(&ucmd
, udata
, sizeof ucmd
)) {
734 qp
->sq_no_prefetch
= ucmd
.sq_no_prefetch
;
736 err
= set_user_sq_size(dev
, qp
, &ucmd
);
740 qp
->umem
= ib_umem_get(pd
->uobject
->context
, ucmd
.buf_addr
,
742 if (IS_ERR(qp
->umem
)) {
743 err
= PTR_ERR(qp
->umem
);
747 err
= mlx4_mtt_init(dev
->dev
, ib_umem_page_count(qp
->umem
),
748 qp
->umem
->page_shift
, &qp
->mtt
);
752 err
= mlx4_ib_umem_write_mtt(dev
, &qp
->mtt
, qp
->umem
);
756 if (qp_has_rq(init_attr
)) {
757 err
= mlx4_ib_db_map_user(to_mucontext(pd
->uobject
->context
),
758 ucmd
.db_addr
, &qp
->db
);
763 qp
->sq_no_prefetch
= 0;
765 if (init_attr
->create_flags
& IB_QP_CREATE_IPOIB_UD_LSO
)
766 qp
->flags
|= MLX4_IB_QP_LSO
;
768 if (init_attr
->create_flags
& IB_QP_CREATE_NETIF_QP
) {
769 if (dev
->steering_support
==
770 MLX4_STEERING_MODE_DEVICE_MANAGED
)
771 qp
->flags
|= MLX4_IB_QP_NETIF
;
776 memcpy(&backup_cap
, &init_attr
->cap
, sizeof(backup_cap
));
777 err
= set_kernel_sq_size(dev
, &init_attr
->cap
,
782 if (qp_has_rq(init_attr
)) {
783 err
= mlx4_db_alloc(dev
->dev
, &qp
->db
, 0, gfp
);
790 if (mlx4_buf_alloc(dev
->dev
, qp
->buf_size
, qp
->buf_size
,
792 memcpy(&init_attr
->cap
, &backup_cap
,
794 err
= set_kernel_sq_size(dev
, &init_attr
->cap
, qp_type
,
799 if (mlx4_buf_alloc(dev
->dev
, qp
->buf_size
,
800 PAGE_SIZE
* 2, &qp
->buf
, gfp
)) {
806 err
= mlx4_mtt_init(dev
->dev
, qp
->buf
.npages
, qp
->buf
.page_shift
,
811 err
= mlx4_buf_write_mtt(dev
->dev
, &qp
->mtt
, &qp
->buf
, gfp
);
815 qp
->sq
.wrid
= kmalloc_array(qp
->sq
.wqe_cnt
, sizeof(u64
),
818 qp
->sq
.wrid
= __vmalloc(qp
->sq
.wqe_cnt
* sizeof(u64
),
820 qp
->rq
.wrid
= kmalloc_array(qp
->rq
.wqe_cnt
, sizeof(u64
),
823 qp
->rq
.wrid
= __vmalloc(qp
->rq
.wqe_cnt
* sizeof(u64
),
825 if (!qp
->sq
.wrid
|| !qp
->rq
.wrid
) {
832 if (qp
->mlx4_ib_qp_type
& (MLX4_IB_QPT_PROXY_SMI_OWNER
|
833 MLX4_IB_QPT_PROXY_SMI
| MLX4_IB_QPT_PROXY_GSI
)) {
834 if (alloc_proxy_bufs(pd
->device
, qp
)) {
840 /* Raw packet QPNs may not have bits 6,7 set in their qp_num;
841 * otherwise, the WQE BlueFlame setup flow wrongly causes
843 if (init_attr
->qp_type
== IB_QPT_RAW_PACKET
)
844 err
= mlx4_qp_reserve_range(dev
->dev
, 1, 1, &qpn
,
845 (init_attr
->cap
.max_send_wr
?
846 MLX4_RESERVE_ETH_BF_QP
: 0) |
847 (init_attr
->cap
.max_recv_wr
?
848 MLX4_RESERVE_A0_QP
: 0));
850 if (qp
->flags
& MLX4_IB_QP_NETIF
)
851 err
= mlx4_ib_steer_qp_alloc(dev
, 1, &qpn
);
853 err
= mlx4_qp_reserve_range(dev
->dev
, 1, 1,
859 if (init_attr
->create_flags
& IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
)
860 qp
->flags
|= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
;
862 err
= mlx4_qp_alloc(dev
->dev
, qpn
, &qp
->mqp
, gfp
);
866 if (init_attr
->qp_type
== IB_QPT_XRC_TGT
)
867 qp
->mqp
.qpn
|= (1 << 23);
870 * Hardware wants QPN written in big-endian order (after
871 * shifting) for send doorbell. Precompute this value to save
872 * a little bit when posting sends.
874 qp
->doorbell_qpn
= swab32(qp
->mqp
.qpn
<< 8);
876 qp
->mqp
.event
= mlx4_ib_qp_event
;
880 spin_lock_irqsave(&dev
->reset_flow_resource_lock
, flags
);
881 mlx4_ib_lock_cqs(to_mcq(init_attr
->send_cq
),
882 to_mcq(init_attr
->recv_cq
));
883 /* Maintain device to QPs access, needed for further handling
886 list_add_tail(&qp
->qps_list
, &dev
->qp_list
);
887 /* Maintain CQ to QPs access, needed for further handling
890 mcq
= to_mcq(init_attr
->send_cq
);
891 list_add_tail(&qp
->cq_send_list
, &mcq
->send_qp_list
);
892 mcq
= to_mcq(init_attr
->recv_cq
);
893 list_add_tail(&qp
->cq_recv_list
, &mcq
->recv_qp_list
);
894 mlx4_ib_unlock_cqs(to_mcq(init_attr
->send_cq
),
895 to_mcq(init_attr
->recv_cq
));
896 spin_unlock_irqrestore(&dev
->reset_flow_resource_lock
, flags
);
901 if (qp
->flags
& MLX4_IB_QP_NETIF
)
902 mlx4_ib_steer_qp_free(dev
, qpn
, 1);
904 mlx4_qp_release_range(dev
->dev
, qpn
, 1);
907 if (qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_PROXY_GSI
)
908 free_proxy_bufs(pd
->device
, qp
);
911 if (qp_has_rq(init_attr
))
912 mlx4_ib_db_unmap_user(to_mucontext(pd
->uobject
->context
), &qp
->db
);
919 mlx4_mtt_cleanup(dev
->dev
, &qp
->mtt
);
923 ib_umem_release(qp
->umem
);
925 mlx4_buf_free(dev
->dev
, qp
->buf_size
, &qp
->buf
);
928 if (!pd
->uobject
&& qp_has_rq(init_attr
))
929 mlx4_db_free(dev
->dev
, &qp
->db
);
934 else if (!*caller_qp
)
939 static enum mlx4_qp_state
to_mlx4_state(enum ib_qp_state state
)
942 case IB_QPS_RESET
: return MLX4_QP_STATE_RST
;
943 case IB_QPS_INIT
: return MLX4_QP_STATE_INIT
;
944 case IB_QPS_RTR
: return MLX4_QP_STATE_RTR
;
945 case IB_QPS_RTS
: return MLX4_QP_STATE_RTS
;
946 case IB_QPS_SQD
: return MLX4_QP_STATE_SQD
;
947 case IB_QPS_SQE
: return MLX4_QP_STATE_SQER
;
948 case IB_QPS_ERR
: return MLX4_QP_STATE_ERR
;
953 static void mlx4_ib_lock_cqs(struct mlx4_ib_cq
*send_cq
, struct mlx4_ib_cq
*recv_cq
)
954 __acquires(&send_cq
->lock
) __acquires(&recv_cq
->lock
)
956 if (send_cq
== recv_cq
) {
957 spin_lock(&send_cq
->lock
);
958 __acquire(&recv_cq
->lock
);
959 } else if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
960 spin_lock(&send_cq
->lock
);
961 spin_lock_nested(&recv_cq
->lock
, SINGLE_DEPTH_NESTING
);
963 spin_lock(&recv_cq
->lock
);
964 spin_lock_nested(&send_cq
->lock
, SINGLE_DEPTH_NESTING
);
968 static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq
*send_cq
, struct mlx4_ib_cq
*recv_cq
)
969 __releases(&send_cq
->lock
) __releases(&recv_cq
->lock
)
971 if (send_cq
== recv_cq
) {
972 __release(&recv_cq
->lock
);
973 spin_unlock(&send_cq
->lock
);
974 } else if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
975 spin_unlock(&recv_cq
->lock
);
976 spin_unlock(&send_cq
->lock
);
978 spin_unlock(&send_cq
->lock
);
979 spin_unlock(&recv_cq
->lock
);
983 static void del_gid_entries(struct mlx4_ib_qp
*qp
)
985 struct mlx4_ib_gid_entry
*ge
, *tmp
;
987 list_for_each_entry_safe(ge
, tmp
, &qp
->gid_list
, list
) {
993 static struct mlx4_ib_pd
*get_pd(struct mlx4_ib_qp
*qp
)
995 if (qp
->ibqp
.qp_type
== IB_QPT_XRC_TGT
)
996 return to_mpd(to_mxrcd(qp
->ibqp
.xrcd
)->pd
);
998 return to_mpd(qp
->ibqp
.pd
);
1001 static void get_cqs(struct mlx4_ib_qp
*qp
,
1002 struct mlx4_ib_cq
**send_cq
, struct mlx4_ib_cq
**recv_cq
)
1004 switch (qp
->ibqp
.qp_type
) {
1005 case IB_QPT_XRC_TGT
:
1006 *send_cq
= to_mcq(to_mxrcd(qp
->ibqp
.xrcd
)->cq
);
1007 *recv_cq
= *send_cq
;
1009 case IB_QPT_XRC_INI
:
1010 *send_cq
= to_mcq(qp
->ibqp
.send_cq
);
1011 *recv_cq
= *send_cq
;
1014 *send_cq
= to_mcq(qp
->ibqp
.send_cq
);
1015 *recv_cq
= to_mcq(qp
->ibqp
.recv_cq
);
1020 static void destroy_qp_common(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
,
1023 struct mlx4_ib_cq
*send_cq
, *recv_cq
;
1024 unsigned long flags
;
1026 if (qp
->state
!= IB_QPS_RESET
) {
1027 if (mlx4_qp_modify(dev
->dev
, NULL
, to_mlx4_state(qp
->state
),
1028 MLX4_QP_STATE_RST
, NULL
, 0, 0, &qp
->mqp
))
1029 pr_warn("modify QP %06x to RESET failed.\n",
1031 if (qp
->pri
.smac
|| (!qp
->pri
.smac
&& qp
->pri
.smac_port
)) {
1032 mlx4_unregister_mac(dev
->dev
, qp
->pri
.smac_port
, qp
->pri
.smac
);
1034 qp
->pri
.smac_port
= 0;
1037 mlx4_unregister_mac(dev
->dev
, qp
->alt
.smac_port
, qp
->alt
.smac
);
1040 if (qp
->pri
.vid
< 0x1000) {
1041 mlx4_unregister_vlan(dev
->dev
, qp
->pri
.vlan_port
, qp
->pri
.vid
);
1042 qp
->pri
.vid
= 0xFFFF;
1043 qp
->pri
.candidate_vid
= 0xFFFF;
1044 qp
->pri
.update_vid
= 0;
1046 if (qp
->alt
.vid
< 0x1000) {
1047 mlx4_unregister_vlan(dev
->dev
, qp
->alt
.vlan_port
, qp
->alt
.vid
);
1048 qp
->alt
.vid
= 0xFFFF;
1049 qp
->alt
.candidate_vid
= 0xFFFF;
1050 qp
->alt
.update_vid
= 0;
1054 get_cqs(qp
, &send_cq
, &recv_cq
);
1056 spin_lock_irqsave(&dev
->reset_flow_resource_lock
, flags
);
1057 mlx4_ib_lock_cqs(send_cq
, recv_cq
);
1059 /* del from lists under both locks above to protect reset flow paths */
1060 list_del(&qp
->qps_list
);
1061 list_del(&qp
->cq_send_list
);
1062 list_del(&qp
->cq_recv_list
);
1064 __mlx4_ib_cq_clean(recv_cq
, qp
->mqp
.qpn
,
1065 qp
->ibqp
.srq
? to_msrq(qp
->ibqp
.srq
): NULL
);
1066 if (send_cq
!= recv_cq
)
1067 __mlx4_ib_cq_clean(send_cq
, qp
->mqp
.qpn
, NULL
);
1070 mlx4_qp_remove(dev
->dev
, &qp
->mqp
);
1072 mlx4_ib_unlock_cqs(send_cq
, recv_cq
);
1073 spin_unlock_irqrestore(&dev
->reset_flow_resource_lock
, flags
);
1075 mlx4_qp_free(dev
->dev
, &qp
->mqp
);
1077 if (!is_sqp(dev
, qp
) && !is_tunnel_qp(dev
, qp
)) {
1078 if (qp
->flags
& MLX4_IB_QP_NETIF
)
1079 mlx4_ib_steer_qp_free(dev
, qp
->mqp
.qpn
, 1);
1081 mlx4_qp_release_range(dev
->dev
, qp
->mqp
.qpn
, 1);
1084 mlx4_mtt_cleanup(dev
->dev
, &qp
->mtt
);
1088 mlx4_ib_db_unmap_user(to_mucontext(qp
->ibqp
.uobject
->context
),
1090 ib_umem_release(qp
->umem
);
1092 kvfree(qp
->sq
.wrid
);
1093 kvfree(qp
->rq
.wrid
);
1094 if (qp
->mlx4_ib_qp_type
& (MLX4_IB_QPT_PROXY_SMI_OWNER
|
1095 MLX4_IB_QPT_PROXY_SMI
| MLX4_IB_QPT_PROXY_GSI
))
1096 free_proxy_bufs(&dev
->ib_dev
, qp
);
1097 mlx4_buf_free(dev
->dev
, qp
->buf_size
, &qp
->buf
);
1099 mlx4_db_free(dev
->dev
, &qp
->db
);
1102 del_gid_entries(qp
);
1105 static u32
get_sqp_num(struct mlx4_ib_dev
*dev
, struct ib_qp_init_attr
*attr
)
1108 if (!mlx4_is_mfunc(dev
->dev
) ||
1109 (mlx4_is_master(dev
->dev
) &&
1110 attr
->create_flags
& MLX4_IB_SRIOV_SQP
)) {
1111 return dev
->dev
->phys_caps
.base_sqpn
+
1112 (attr
->qp_type
== IB_QPT_SMI
? 0 : 2) +
1115 /* PF or VF -- creating proxies */
1116 if (attr
->qp_type
== IB_QPT_SMI
)
1117 return dev
->dev
->caps
.qp0_proxy
[attr
->port_num
- 1];
1119 return dev
->dev
->caps
.qp1_proxy
[attr
->port_num
- 1];
1122 static struct ib_qp
*_mlx4_ib_create_qp(struct ib_pd
*pd
,
1123 struct ib_qp_init_attr
*init_attr
,
1124 struct ib_udata
*udata
)
1126 struct mlx4_ib_qp
*qp
= NULL
;
1128 int sup_u_create_flags
= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
;
1132 gfp
= (init_attr
->create_flags
& MLX4_IB_QP_CREATE_USE_GFP_NOIO
) ?
1133 GFP_NOIO
: GFP_KERNEL
;
1135 * We only support LSO, vendor flag1, and multicast loopback blocking,
1136 * and only for kernel UD QPs.
1138 if (init_attr
->create_flags
& ~(MLX4_IB_QP_LSO
|
1139 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
|
1140 MLX4_IB_SRIOV_TUNNEL_QP
|
1143 MLX4_IB_QP_CREATE_ROCE_V2_GSI
|
1144 MLX4_IB_QP_CREATE_USE_GFP_NOIO
))
1145 return ERR_PTR(-EINVAL
);
1147 if (init_attr
->create_flags
& IB_QP_CREATE_NETIF_QP
) {
1148 if (init_attr
->qp_type
!= IB_QPT_UD
)
1149 return ERR_PTR(-EINVAL
);
1152 if (init_attr
->create_flags
) {
1153 if (udata
&& init_attr
->create_flags
& ~(sup_u_create_flags
))
1154 return ERR_PTR(-EINVAL
);
1156 if ((init_attr
->create_flags
& ~(MLX4_IB_SRIOV_SQP
|
1157 MLX4_IB_QP_CREATE_USE_GFP_NOIO
|
1158 MLX4_IB_QP_CREATE_ROCE_V2_GSI
|
1159 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
) &&
1160 init_attr
->qp_type
!= IB_QPT_UD
) ||
1161 (init_attr
->create_flags
& MLX4_IB_SRIOV_SQP
&&
1162 init_attr
->qp_type
> IB_QPT_GSI
) ||
1163 (init_attr
->create_flags
& MLX4_IB_QP_CREATE_ROCE_V2_GSI
&&
1164 init_attr
->qp_type
!= IB_QPT_GSI
))
1165 return ERR_PTR(-EINVAL
);
1168 switch (init_attr
->qp_type
) {
1169 case IB_QPT_XRC_TGT
:
1170 pd
= to_mxrcd(init_attr
->xrcd
)->pd
;
1171 xrcdn
= to_mxrcd(init_attr
->xrcd
)->xrcdn
;
1172 init_attr
->send_cq
= to_mxrcd(init_attr
->xrcd
)->cq
;
1174 case IB_QPT_XRC_INI
:
1175 if (!(to_mdev(pd
->device
)->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_XRC
))
1176 return ERR_PTR(-ENOSYS
);
1177 init_attr
->recv_cq
= init_attr
->send_cq
;
1181 case IB_QPT_RAW_PACKET
:
1182 qp
= kzalloc(sizeof *qp
, gfp
);
1184 return ERR_PTR(-ENOMEM
);
1185 qp
->pri
.vid
= 0xFFFF;
1186 qp
->alt
.vid
= 0xFFFF;
1190 err
= create_qp_common(to_mdev(pd
->device
), pd
, init_attr
,
1191 udata
, 0, &qp
, gfp
);
1194 return ERR_PTR(err
);
1197 qp
->ibqp
.qp_num
= qp
->mqp
.qpn
;
1207 /* Userspace is not allowed to create special QPs: */
1209 return ERR_PTR(-EINVAL
);
1210 if (init_attr
->create_flags
& MLX4_IB_QP_CREATE_ROCE_V2_GSI
) {
1211 int res
= mlx4_qp_reserve_range(to_mdev(pd
->device
)->dev
, 1, 1, &sqpn
, 0);
1214 return ERR_PTR(res
);
1216 sqpn
= get_sqp_num(to_mdev(pd
->device
), init_attr
);
1219 err
= create_qp_common(to_mdev(pd
->device
), pd
, init_attr
, udata
,
1223 return ERR_PTR(err
);
1225 qp
->port
= init_attr
->port_num
;
1226 qp
->ibqp
.qp_num
= init_attr
->qp_type
== IB_QPT_SMI
? 0 :
1227 init_attr
->create_flags
& MLX4_IB_QP_CREATE_ROCE_V2_GSI
? sqpn
: 1;
1231 /* Don't support raw QPs */
1232 return ERR_PTR(-EINVAL
);
1238 struct ib_qp
*mlx4_ib_create_qp(struct ib_pd
*pd
,
1239 struct ib_qp_init_attr
*init_attr
,
1240 struct ib_udata
*udata
) {
1241 struct ib_device
*device
= pd
? pd
->device
: init_attr
->xrcd
->device
;
1243 struct mlx4_ib_dev
*dev
= to_mdev(device
);
1245 ibqp
= _mlx4_ib_create_qp(pd
, init_attr
, udata
);
1247 if (!IS_ERR(ibqp
) &&
1248 (init_attr
->qp_type
== IB_QPT_GSI
) &&
1249 !(init_attr
->create_flags
& MLX4_IB_QP_CREATE_ROCE_V2_GSI
)) {
1250 struct mlx4_ib_sqp
*sqp
= to_msqp((to_mqp(ibqp
)));
1251 int is_eth
= rdma_cap_eth_ah(&dev
->ib_dev
, init_attr
->port_num
);
1254 dev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_ROCE_V1_V2
) {
1255 init_attr
->create_flags
|= MLX4_IB_QP_CREATE_ROCE_V2_GSI
;
1256 sqp
->roce_v2_gsi
= ib_create_qp(pd
, init_attr
);
1258 if (IS_ERR(sqp
->roce_v2_gsi
)) {
1259 pr_err("Failed to create GSI QP for RoCEv2 (%ld)\n", PTR_ERR(sqp
->roce_v2_gsi
));
1260 sqp
->roce_v2_gsi
= NULL
;
1262 sqp
= to_msqp(to_mqp(sqp
->roce_v2_gsi
));
1263 sqp
->qp
.flags
|= MLX4_IB_ROCE_V2_GSI_QP
;
1266 init_attr
->create_flags
&= ~MLX4_IB_QP_CREATE_ROCE_V2_GSI
;
1272 static int _mlx4_ib_destroy_qp(struct ib_qp
*qp
)
1274 struct mlx4_ib_dev
*dev
= to_mdev(qp
->device
);
1275 struct mlx4_ib_qp
*mqp
= to_mqp(qp
);
1276 struct mlx4_ib_pd
*pd
;
1278 if (is_qp0(dev
, mqp
))
1279 mlx4_CLOSE_PORT(dev
->dev
, mqp
->port
);
1281 if (mqp
->mlx4_ib_qp_type
== MLX4_IB_QPT_PROXY_GSI
&&
1282 dev
->qp1_proxy
[mqp
->port
- 1] == mqp
) {
1283 mutex_lock(&dev
->qp1_proxy_lock
[mqp
->port
- 1]);
1284 dev
->qp1_proxy
[mqp
->port
- 1] = NULL
;
1285 mutex_unlock(&dev
->qp1_proxy_lock
[mqp
->port
- 1]);
1288 if (mqp
->counter_index
)
1289 mlx4_ib_free_qp_counter(dev
, mqp
);
1292 destroy_qp_common(dev
, mqp
, !!pd
->ibpd
.uobject
);
1294 if (is_sqp(dev
, mqp
))
1295 kfree(to_msqp(mqp
));
1302 int mlx4_ib_destroy_qp(struct ib_qp
*qp
)
1304 struct mlx4_ib_qp
*mqp
= to_mqp(qp
);
1306 if (mqp
->mlx4_ib_qp_type
== MLX4_IB_QPT_GSI
) {
1307 struct mlx4_ib_sqp
*sqp
= to_msqp(mqp
);
1309 if (sqp
->roce_v2_gsi
)
1310 ib_destroy_qp(sqp
->roce_v2_gsi
);
1313 return _mlx4_ib_destroy_qp(qp
);
1316 static int to_mlx4_st(struct mlx4_ib_dev
*dev
, enum mlx4_ib_qp_type type
)
1319 case MLX4_IB_QPT_RC
: return MLX4_QP_ST_RC
;
1320 case MLX4_IB_QPT_UC
: return MLX4_QP_ST_UC
;
1321 case MLX4_IB_QPT_UD
: return MLX4_QP_ST_UD
;
1322 case MLX4_IB_QPT_XRC_INI
:
1323 case MLX4_IB_QPT_XRC_TGT
: return MLX4_QP_ST_XRC
;
1324 case MLX4_IB_QPT_SMI
:
1325 case MLX4_IB_QPT_GSI
:
1326 case MLX4_IB_QPT_RAW_PACKET
: return MLX4_QP_ST_MLX
;
1328 case MLX4_IB_QPT_PROXY_SMI_OWNER
:
1329 case MLX4_IB_QPT_TUN_SMI_OWNER
: return (mlx4_is_mfunc(dev
->dev
) ?
1330 MLX4_QP_ST_MLX
: -1);
1331 case MLX4_IB_QPT_PROXY_SMI
:
1332 case MLX4_IB_QPT_TUN_SMI
:
1333 case MLX4_IB_QPT_PROXY_GSI
:
1334 case MLX4_IB_QPT_TUN_GSI
: return (mlx4_is_mfunc(dev
->dev
) ?
1335 MLX4_QP_ST_UD
: -1);
1340 static __be32
to_mlx4_access_flags(struct mlx4_ib_qp
*qp
, const struct ib_qp_attr
*attr
,
1345 u32 hw_access_flags
= 0;
1347 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
1348 dest_rd_atomic
= attr
->max_dest_rd_atomic
;
1350 dest_rd_atomic
= qp
->resp_depth
;
1352 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
1353 access_flags
= attr
->qp_access_flags
;
1355 access_flags
= qp
->atomic_rd_en
;
1357 if (!dest_rd_atomic
)
1358 access_flags
&= IB_ACCESS_REMOTE_WRITE
;
1360 if (access_flags
& IB_ACCESS_REMOTE_READ
)
1361 hw_access_flags
|= MLX4_QP_BIT_RRE
;
1362 if (access_flags
& IB_ACCESS_REMOTE_ATOMIC
)
1363 hw_access_flags
|= MLX4_QP_BIT_RAE
;
1364 if (access_flags
& IB_ACCESS_REMOTE_WRITE
)
1365 hw_access_flags
|= MLX4_QP_BIT_RWE
;
1367 return cpu_to_be32(hw_access_flags
);
1370 static void store_sqp_attrs(struct mlx4_ib_sqp
*sqp
, const struct ib_qp_attr
*attr
,
1373 if (attr_mask
& IB_QP_PKEY_INDEX
)
1374 sqp
->pkey_index
= attr
->pkey_index
;
1375 if (attr_mask
& IB_QP_QKEY
)
1376 sqp
->qkey
= attr
->qkey
;
1377 if (attr_mask
& IB_QP_SQ_PSN
)
1378 sqp
->send_psn
= attr
->sq_psn
;
1381 static void mlx4_set_sched(struct mlx4_qp_path
*path
, u8 port
)
1383 path
->sched_queue
= (path
->sched_queue
& 0xbf) | ((port
- 1) << 6);
1386 static int _mlx4_set_path(struct mlx4_ib_dev
*dev
,
1387 const struct rdma_ah_attr
*ah
,
1388 u64 smac
, u16 vlan_tag
, struct mlx4_qp_path
*path
,
1389 struct mlx4_roce_smac_vlan_info
*smac_info
, u8 port
)
1395 path
->grh_mylmc
= rdma_ah_get_path_bits(ah
) & 0x7f;
1396 path
->rlid
= cpu_to_be16(rdma_ah_get_dlid(ah
));
1397 if (rdma_ah_get_static_rate(ah
)) {
1398 path
->static_rate
= rdma_ah_get_static_rate(ah
) +
1399 MLX4_STAT_RATE_OFFSET
;
1400 while (path
->static_rate
> IB_RATE_2_5_GBPS
+ MLX4_STAT_RATE_OFFSET
&&
1401 !(1 << path
->static_rate
& dev
->dev
->caps
.stat_rate_support
))
1402 --path
->static_rate
;
1404 path
->static_rate
= 0;
1406 if (rdma_ah_get_ah_flags(ah
) & IB_AH_GRH
) {
1407 const struct ib_global_route
*grh
= rdma_ah_read_grh(ah
);
1408 int real_sgid_index
=
1409 mlx4_ib_gid_index_to_real_index(dev
, port
,
1412 if (real_sgid_index
>= dev
->dev
->caps
.gid_table_len
[port
]) {
1413 pr_err("sgid_index (%u) too large. max is %d\n",
1414 real_sgid_index
, dev
->dev
->caps
.gid_table_len
[port
] - 1);
1418 path
->grh_mylmc
|= 1 << 7;
1419 path
->mgid_index
= real_sgid_index
;
1420 path
->hop_limit
= grh
->hop_limit
;
1421 path
->tclass_flowlabel
=
1422 cpu_to_be32((grh
->traffic_class
<< 20) |
1424 memcpy(path
->rgid
, grh
->dgid
.raw
, 16);
1427 if (ah
->type
== RDMA_AH_ATTR_TYPE_ROCE
) {
1428 if (!(rdma_ah_get_ah_flags(ah
) & IB_AH_GRH
))
1431 path
->sched_queue
= MLX4_IB_DEFAULT_SCHED_QUEUE
|
1432 ((port
- 1) << 6) | ((rdma_ah_get_sl(ah
) & 7) << 3);
1434 path
->feup
|= MLX4_FEUP_FORCE_ETH_UP
;
1435 if (vlan_tag
< 0x1000) {
1436 if (smac_info
->vid
< 0x1000) {
1437 /* both valid vlan ids */
1438 if (smac_info
->vid
!= vlan_tag
) {
1439 /* different VIDs. unreg old and reg new */
1440 err
= mlx4_register_vlan(dev
->dev
, port
, vlan_tag
, &vidx
);
1443 smac_info
->candidate_vid
= vlan_tag
;
1444 smac_info
->candidate_vlan_index
= vidx
;
1445 smac_info
->candidate_vlan_port
= port
;
1446 smac_info
->update_vid
= 1;
1447 path
->vlan_index
= vidx
;
1449 path
->vlan_index
= smac_info
->vlan_index
;
1452 /* no current vlan tag in qp */
1453 err
= mlx4_register_vlan(dev
->dev
, port
, vlan_tag
, &vidx
);
1456 smac_info
->candidate_vid
= vlan_tag
;
1457 smac_info
->candidate_vlan_index
= vidx
;
1458 smac_info
->candidate_vlan_port
= port
;
1459 smac_info
->update_vid
= 1;
1460 path
->vlan_index
= vidx
;
1462 path
->feup
|= MLX4_FVL_FORCE_ETH_VLAN
;
1465 /* have current vlan tag. unregister it at modify-qp success */
1466 if (smac_info
->vid
< 0x1000) {
1467 smac_info
->candidate_vid
= 0xFFFF;
1468 smac_info
->update_vid
= 1;
1472 /* get smac_index for RoCE use.
1473 * If no smac was yet assigned, register one.
1474 * If one was already assigned, but the new mac differs,
1475 * unregister the old one and register the new one.
1477 if ((!smac_info
->smac
&& !smac_info
->smac_port
) ||
1478 smac_info
->smac
!= smac
) {
1479 /* register candidate now, unreg if needed, after success */
1480 smac_index
= mlx4_register_mac(dev
->dev
, port
, smac
);
1481 if (smac_index
>= 0) {
1482 smac_info
->candidate_smac_index
= smac_index
;
1483 smac_info
->candidate_smac
= smac
;
1484 smac_info
->candidate_smac_port
= port
;
1489 smac_index
= smac_info
->smac_index
;
1491 memcpy(path
->dmac
, ah
->roce
.dmac
, 6);
1492 path
->ackto
= MLX4_IB_LINK_TYPE_ETH
;
1493 /* put MAC table smac index for IBoE */
1494 path
->grh_mylmc
= (u8
) (smac_index
) | 0x80;
1496 path
->sched_queue
= MLX4_IB_DEFAULT_SCHED_QUEUE
|
1497 ((port
- 1) << 6) | ((rdma_ah_get_sl(ah
) & 0xf) << 2);
1503 static int mlx4_set_path(struct mlx4_ib_dev
*dev
, const struct ib_qp_attr
*qp
,
1504 enum ib_qp_attr_mask qp_attr_mask
,
1505 struct mlx4_ib_qp
*mqp
,
1506 struct mlx4_qp_path
*path
, u8 port
,
1507 u16 vlan_id
, u8
*smac
)
1509 return _mlx4_set_path(dev
, &qp
->ah_attr
,
1510 mlx4_mac_to_u64(smac
),
1512 path
, &mqp
->pri
, port
);
1515 static int mlx4_set_alt_path(struct mlx4_ib_dev
*dev
,
1516 const struct ib_qp_attr
*qp
,
1517 enum ib_qp_attr_mask qp_attr_mask
,
1518 struct mlx4_ib_qp
*mqp
,
1519 struct mlx4_qp_path
*path
, u8 port
)
1521 return _mlx4_set_path(dev
, &qp
->alt_ah_attr
,
1524 path
, &mqp
->alt
, port
);
1527 static void update_mcg_macs(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
)
1529 struct mlx4_ib_gid_entry
*ge
, *tmp
;
1531 list_for_each_entry_safe(ge
, tmp
, &qp
->gid_list
, list
) {
1532 if (!ge
->added
&& mlx4_ib_add_mc(dev
, qp
, &ge
->gid
)) {
1534 ge
->port
= qp
->port
;
1539 static int handle_eth_ud_smac_index(struct mlx4_ib_dev
*dev
,
1540 struct mlx4_ib_qp
*qp
,
1541 struct mlx4_qp_context
*context
)
1546 u64_mac
= atomic64_read(&dev
->iboe
.mac
[qp
->port
- 1]);
1548 context
->pri_path
.sched_queue
= MLX4_IB_DEFAULT_SCHED_QUEUE
| ((qp
->port
- 1) << 6);
1549 if (!qp
->pri
.smac
&& !qp
->pri
.smac_port
) {
1550 smac_index
= mlx4_register_mac(dev
->dev
, qp
->port
, u64_mac
);
1551 if (smac_index
>= 0) {
1552 qp
->pri
.candidate_smac_index
= smac_index
;
1553 qp
->pri
.candidate_smac
= u64_mac
;
1554 qp
->pri
.candidate_smac_port
= qp
->port
;
1555 context
->pri_path
.grh_mylmc
= 0x80 | (u8
) smac_index
;
1563 static int create_qp_lb_counter(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
)
1565 struct counter_index
*new_counter_index
;
1569 if (rdma_port_get_link_layer(&dev
->ib_dev
, qp
->port
) !=
1570 IB_LINK_LAYER_ETHERNET
||
1571 !(qp
->flags
& MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
) ||
1572 !(dev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_LB_SRC_CHK
))
1575 err
= mlx4_counter_alloc(dev
->dev
, &tmp_idx
);
1579 new_counter_index
= kmalloc(sizeof(*new_counter_index
), GFP_KERNEL
);
1580 if (!new_counter_index
) {
1581 mlx4_counter_free(dev
->dev
, tmp_idx
);
1585 new_counter_index
->index
= tmp_idx
;
1586 new_counter_index
->allocated
= 1;
1587 qp
->counter_index
= new_counter_index
;
1589 mutex_lock(&dev
->counters_table
[qp
->port
- 1].mutex
);
1590 list_add_tail(&new_counter_index
->list
,
1591 &dev
->counters_table
[qp
->port
- 1].counters_list
);
1592 mutex_unlock(&dev
->counters_table
[qp
->port
- 1].mutex
);
1598 MLX4_QPC_ROCE_MODE_1
= 0,
1599 MLX4_QPC_ROCE_MODE_2
= 2,
1600 MLX4_QPC_ROCE_MODE_UNDEFINED
= 0xff
1603 static u8
gid_type_to_qpc(enum ib_gid_type gid_type
)
1606 case IB_GID_TYPE_ROCE
:
1607 return MLX4_QPC_ROCE_MODE_1
;
1608 case IB_GID_TYPE_ROCE_UDP_ENCAP
:
1609 return MLX4_QPC_ROCE_MODE_2
;
1611 return MLX4_QPC_ROCE_MODE_UNDEFINED
;
1615 static int __mlx4_ib_modify_qp(struct ib_qp
*ibqp
,
1616 const struct ib_qp_attr
*attr
, int attr_mask
,
1617 enum ib_qp_state cur_state
, enum ib_qp_state new_state
)
1619 struct mlx4_ib_dev
*dev
= to_mdev(ibqp
->device
);
1620 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
1621 struct mlx4_ib_pd
*pd
;
1622 struct mlx4_ib_cq
*send_cq
, *recv_cq
;
1623 struct mlx4_qp_context
*context
;
1624 enum mlx4_qp_optpar optpar
= 0;
1630 /* APM is not supported under RoCE */
1631 if (attr_mask
& IB_QP_ALT_PATH
&&
1632 rdma_port_get_link_layer(&dev
->ib_dev
, qp
->port
) ==
1633 IB_LINK_LAYER_ETHERNET
)
1636 context
= kzalloc(sizeof *context
, GFP_KERNEL
);
1640 context
->flags
= cpu_to_be32((to_mlx4_state(new_state
) << 28) |
1641 (to_mlx4_st(dev
, qp
->mlx4_ib_qp_type
) << 16));
1643 if (!(attr_mask
& IB_QP_PATH_MIG_STATE
))
1644 context
->flags
|= cpu_to_be32(MLX4_QP_PM_MIGRATED
<< 11);
1646 optpar
|= MLX4_QP_OPTPAR_PM_STATE
;
1647 switch (attr
->path_mig_state
) {
1648 case IB_MIG_MIGRATED
:
1649 context
->flags
|= cpu_to_be32(MLX4_QP_PM_MIGRATED
<< 11);
1652 context
->flags
|= cpu_to_be32(MLX4_QP_PM_REARM
<< 11);
1655 context
->flags
|= cpu_to_be32(MLX4_QP_PM_ARMED
<< 11);
1660 if (ibqp
->qp_type
== IB_QPT_GSI
|| ibqp
->qp_type
== IB_QPT_SMI
)
1661 context
->mtu_msgmax
= (IB_MTU_4096
<< 5) | 11;
1662 else if (ibqp
->qp_type
== IB_QPT_RAW_PACKET
)
1663 context
->mtu_msgmax
= (MLX4_RAW_QP_MTU
<< 5) | MLX4_RAW_QP_MSGMAX
;
1664 else if (ibqp
->qp_type
== IB_QPT_UD
) {
1665 if (qp
->flags
& MLX4_IB_QP_LSO
)
1666 context
->mtu_msgmax
= (IB_MTU_4096
<< 5) |
1667 ilog2(dev
->dev
->caps
.max_gso_sz
);
1669 context
->mtu_msgmax
= (IB_MTU_4096
<< 5) | 12;
1670 } else if (attr_mask
& IB_QP_PATH_MTU
) {
1671 if (attr
->path_mtu
< IB_MTU_256
|| attr
->path_mtu
> IB_MTU_4096
) {
1672 pr_err("path MTU (%u) is invalid\n",
1676 context
->mtu_msgmax
= (attr
->path_mtu
<< 5) |
1677 ilog2(dev
->dev
->caps
.max_msg_sz
);
1681 context
->rq_size_stride
= ilog2(qp
->rq
.wqe_cnt
) << 3;
1682 context
->rq_size_stride
|= qp
->rq
.wqe_shift
- 4;
1685 context
->sq_size_stride
= ilog2(qp
->sq
.wqe_cnt
) << 3;
1686 context
->sq_size_stride
|= qp
->sq
.wqe_shift
- 4;
1688 if (new_state
== IB_QPS_RESET
&& qp
->counter_index
)
1689 mlx4_ib_free_qp_counter(dev
, qp
);
1691 if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
) {
1692 context
->sq_size_stride
|= !!qp
->sq_no_prefetch
<< 7;
1693 context
->xrcd
= cpu_to_be32((u32
) qp
->xrcdn
);
1694 if (ibqp
->qp_type
== IB_QPT_RAW_PACKET
)
1695 context
->param3
|= cpu_to_be32(1 << 30);
1698 if (qp
->ibqp
.uobject
)
1699 context
->usr_page
= cpu_to_be32(
1700 mlx4_to_hw_uar_index(dev
->dev
,
1701 to_mucontext(ibqp
->uobject
->context
)->uar
.index
));
1703 context
->usr_page
= cpu_to_be32(
1704 mlx4_to_hw_uar_index(dev
->dev
, dev
->priv_uar
.index
));
1706 if (attr_mask
& IB_QP_DEST_QPN
)
1707 context
->remote_qpn
= cpu_to_be32(attr
->dest_qp_num
);
1709 if (attr_mask
& IB_QP_PORT
) {
1710 if (cur_state
== IB_QPS_SQD
&& new_state
== IB_QPS_SQD
&&
1711 !(attr_mask
& IB_QP_AV
)) {
1712 mlx4_set_sched(&context
->pri_path
, attr
->port_num
);
1713 optpar
|= MLX4_QP_OPTPAR_SCHED_QUEUE
;
1717 if (cur_state
== IB_QPS_INIT
&& new_state
== IB_QPS_RTR
) {
1718 err
= create_qp_lb_counter(dev
, qp
);
1723 dev
->counters_table
[qp
->port
- 1].default_counter
;
1724 if (qp
->counter_index
)
1725 counter_index
= qp
->counter_index
->index
;
1727 if (counter_index
!= -1) {
1728 context
->pri_path
.counter_index
= counter_index
;
1729 optpar
|= MLX4_QP_OPTPAR_COUNTER_INDEX
;
1730 if (qp
->counter_index
) {
1731 context
->pri_path
.fl
|=
1732 MLX4_FL_ETH_SRC_CHECK_MC_LB
;
1733 context
->pri_path
.vlan_control
|=
1734 MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER
;
1737 context
->pri_path
.counter_index
=
1738 MLX4_SINK_COUNTER_INDEX(dev
->dev
);
1740 if (qp
->flags
& MLX4_IB_QP_NETIF
) {
1741 mlx4_ib_steer_qp_reg(dev
, qp
, 1);
1745 if (ibqp
->qp_type
== IB_QPT_GSI
) {
1746 enum ib_gid_type gid_type
= qp
->flags
& MLX4_IB_ROCE_V2_GSI_QP
?
1747 IB_GID_TYPE_ROCE_UDP_ENCAP
: IB_GID_TYPE_ROCE
;
1748 u8 qpc_roce_mode
= gid_type_to_qpc(gid_type
);
1750 context
->rlkey_roce_mode
|= (qpc_roce_mode
<< 6);
1754 if (attr_mask
& IB_QP_PKEY_INDEX
) {
1755 if (qp
->mlx4_ib_qp_type
& MLX4_IB_QPT_ANY_SRIOV
)
1756 context
->pri_path
.disable_pkey_check
= 0x40;
1757 context
->pri_path
.pkey_index
= attr
->pkey_index
;
1758 optpar
|= MLX4_QP_OPTPAR_PKEY_INDEX
;
1761 if (attr_mask
& IB_QP_AV
) {
1762 u8 port_num
= mlx4_is_bonded(to_mdev(ibqp
->device
)->dev
) ? 1 :
1763 attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
;
1765 struct ib_gid_attr gid_attr
= {.gid_type
= IB_GID_TYPE_IB
};
1770 rdma_cap_eth_ah(&dev
->ib_dev
, port_num
) &&
1771 rdma_ah_get_ah_flags(&attr
->ah_attr
) & IB_AH_GRH
;
1775 rdma_ah_read_grh(&attr
->ah_attr
)->sgid_index
;
1777 status
= ib_get_cached_gid(ibqp
->device
, port_num
,
1778 index
, &gid
, &gid_attr
);
1779 if (!status
&& !memcmp(&gid
, &zgid
, sizeof(gid
)))
1781 if (!status
&& gid_attr
.ndev
) {
1782 vlan
= rdma_vlan_dev_vlan_id(gid_attr
.ndev
);
1783 memcpy(smac
, gid_attr
.ndev
->dev_addr
, ETH_ALEN
);
1784 dev_put(gid_attr
.ndev
);
1790 if (mlx4_set_path(dev
, attr
, attr_mask
, qp
, &context
->pri_path
,
1791 port_num
, vlan
, smac
))
1794 optpar
|= (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH
|
1795 MLX4_QP_OPTPAR_SCHED_QUEUE
);
1798 (cur_state
== IB_QPS_INIT
&& new_state
== IB_QPS_RTR
)) {
1799 u8 qpc_roce_mode
= gid_type_to_qpc(gid_attr
.gid_type
);
1801 if (qpc_roce_mode
== MLX4_QPC_ROCE_MODE_UNDEFINED
) {
1805 context
->rlkey_roce_mode
|= (qpc_roce_mode
<< 6);
1810 if (attr_mask
& IB_QP_TIMEOUT
) {
1811 context
->pri_path
.ackto
|= attr
->timeout
<< 3;
1812 optpar
|= MLX4_QP_OPTPAR_ACK_TIMEOUT
;
1815 if (attr_mask
& IB_QP_ALT_PATH
) {
1816 if (attr
->alt_port_num
== 0 ||
1817 attr
->alt_port_num
> dev
->dev
->caps
.num_ports
)
1820 if (attr
->alt_pkey_index
>=
1821 dev
->dev
->caps
.pkey_table_len
[attr
->alt_port_num
])
1824 if (mlx4_set_alt_path(dev
, attr
, attr_mask
, qp
,
1826 attr
->alt_port_num
))
1829 context
->alt_path
.pkey_index
= attr
->alt_pkey_index
;
1830 context
->alt_path
.ackto
= attr
->alt_timeout
<< 3;
1831 optpar
|= MLX4_QP_OPTPAR_ALT_ADDR_PATH
;
1835 get_cqs(qp
, &send_cq
, &recv_cq
);
1836 context
->pd
= cpu_to_be32(pd
->pdn
);
1837 context
->cqn_send
= cpu_to_be32(send_cq
->mcq
.cqn
);
1838 context
->cqn_recv
= cpu_to_be32(recv_cq
->mcq
.cqn
);
1839 context
->params1
= cpu_to_be32(MLX4_IB_ACK_REQ_FREQ
<< 28);
1841 /* Set "fast registration enabled" for all kernel QPs */
1842 if (!qp
->ibqp
.uobject
)
1843 context
->params1
|= cpu_to_be32(1 << 11);
1845 if (attr_mask
& IB_QP_RNR_RETRY
) {
1846 context
->params1
|= cpu_to_be32(attr
->rnr_retry
<< 13);
1847 optpar
|= MLX4_QP_OPTPAR_RNR_RETRY
;
1850 if (attr_mask
& IB_QP_RETRY_CNT
) {
1851 context
->params1
|= cpu_to_be32(attr
->retry_cnt
<< 16);
1852 optpar
|= MLX4_QP_OPTPAR_RETRY_COUNT
;
1855 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
) {
1856 if (attr
->max_rd_atomic
)
1858 cpu_to_be32(fls(attr
->max_rd_atomic
- 1) << 21);
1859 optpar
|= MLX4_QP_OPTPAR_SRA_MAX
;
1862 if (attr_mask
& IB_QP_SQ_PSN
)
1863 context
->next_send_psn
= cpu_to_be32(attr
->sq_psn
);
1865 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
) {
1866 if (attr
->max_dest_rd_atomic
)
1868 cpu_to_be32(fls(attr
->max_dest_rd_atomic
- 1) << 21);
1869 optpar
|= MLX4_QP_OPTPAR_RRA_MAX
;
1872 if (attr_mask
& (IB_QP_ACCESS_FLAGS
| IB_QP_MAX_DEST_RD_ATOMIC
)) {
1873 context
->params2
|= to_mlx4_access_flags(qp
, attr
, attr_mask
);
1874 optpar
|= MLX4_QP_OPTPAR_RWE
| MLX4_QP_OPTPAR_RRE
| MLX4_QP_OPTPAR_RAE
;
1878 context
->params2
|= cpu_to_be32(MLX4_QP_BIT_RIC
);
1880 if (attr_mask
& IB_QP_MIN_RNR_TIMER
) {
1881 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->min_rnr_timer
<< 24);
1882 optpar
|= MLX4_QP_OPTPAR_RNR_TIMEOUT
;
1884 if (attr_mask
& IB_QP_RQ_PSN
)
1885 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->rq_psn
);
1887 /* proxy and tunnel qp qkeys will be changed in modify-qp wrappers */
1888 if (attr_mask
& IB_QP_QKEY
) {
1889 if (qp
->mlx4_ib_qp_type
&
1890 (MLX4_IB_QPT_PROXY_SMI_OWNER
| MLX4_IB_QPT_TUN_SMI_OWNER
))
1891 context
->qkey
= cpu_to_be32(IB_QP_SET_QKEY
);
1893 if (mlx4_is_mfunc(dev
->dev
) &&
1894 !(qp
->mlx4_ib_qp_type
& MLX4_IB_QPT_ANY_SRIOV
) &&
1895 (attr
->qkey
& MLX4_RESERVED_QKEY_MASK
) ==
1896 MLX4_RESERVED_QKEY_BASE
) {
1897 pr_err("Cannot use reserved QKEY"
1898 " 0x%x (range 0xffff0000..0xffffffff"
1899 " is reserved)\n", attr
->qkey
);
1903 context
->qkey
= cpu_to_be32(attr
->qkey
);
1905 optpar
|= MLX4_QP_OPTPAR_Q_KEY
;
1909 context
->srqn
= cpu_to_be32(1 << 24 | to_msrq(ibqp
->srq
)->msrq
.srqn
);
1911 if (qp
->rq
.wqe_cnt
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
1912 context
->db_rec_addr
= cpu_to_be64(qp
->db
.dma
);
1914 if (cur_state
== IB_QPS_INIT
&&
1915 new_state
== IB_QPS_RTR
&&
1916 (ibqp
->qp_type
== IB_QPT_GSI
|| ibqp
->qp_type
== IB_QPT_SMI
||
1917 ibqp
->qp_type
== IB_QPT_UD
||
1918 ibqp
->qp_type
== IB_QPT_RAW_PACKET
)) {
1919 context
->pri_path
.sched_queue
= (qp
->port
- 1) << 6;
1920 if (qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_SMI
||
1921 qp
->mlx4_ib_qp_type
&
1922 (MLX4_IB_QPT_PROXY_SMI_OWNER
| MLX4_IB_QPT_TUN_SMI_OWNER
)) {
1923 context
->pri_path
.sched_queue
|= MLX4_IB_DEFAULT_QP0_SCHED_QUEUE
;
1924 if (qp
->mlx4_ib_qp_type
!= MLX4_IB_QPT_SMI
)
1925 context
->pri_path
.fl
= 0x80;
1927 if (qp
->mlx4_ib_qp_type
& MLX4_IB_QPT_ANY_SRIOV
)
1928 context
->pri_path
.fl
= 0x80;
1929 context
->pri_path
.sched_queue
|= MLX4_IB_DEFAULT_SCHED_QUEUE
;
1931 if (rdma_port_get_link_layer(&dev
->ib_dev
, qp
->port
) ==
1932 IB_LINK_LAYER_ETHERNET
) {
1933 if (qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_TUN_GSI
||
1934 qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_GSI
)
1935 context
->pri_path
.feup
= 1 << 7; /* don't fsm */
1936 /* handle smac_index */
1937 if (qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_UD
||
1938 qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_PROXY_GSI
||
1939 qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_TUN_GSI
) {
1940 err
= handle_eth_ud_smac_index(dev
, qp
, context
);
1945 if (qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_PROXY_GSI
)
1946 dev
->qp1_proxy
[qp
->port
- 1] = qp
;
1951 if (qp
->ibqp
.qp_type
== IB_QPT_RAW_PACKET
) {
1952 context
->pri_path
.ackto
= (context
->pri_path
.ackto
& 0xf8) |
1953 MLX4_IB_LINK_TYPE_ETH
;
1954 if (dev
->dev
->caps
.tunnel_offload_mode
== MLX4_TUNNEL_OFFLOAD_MODE_VXLAN
) {
1955 /* set QP to receive both tunneled & non-tunneled packets */
1956 if (!(context
->flags
& cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET
)))
1957 context
->srqn
= cpu_to_be32(7 << 28);
1961 if (ibqp
->qp_type
== IB_QPT_UD
&& (new_state
== IB_QPS_RTR
)) {
1962 int is_eth
= rdma_port_get_link_layer(
1963 &dev
->ib_dev
, qp
->port
) ==
1964 IB_LINK_LAYER_ETHERNET
;
1966 context
->pri_path
.ackto
= MLX4_IB_LINK_TYPE_ETH
;
1967 optpar
|= MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH
;
1972 if (cur_state
== IB_QPS_RTS
&& new_state
== IB_QPS_SQD
&&
1973 attr_mask
& IB_QP_EN_SQD_ASYNC_NOTIFY
&& attr
->en_sqd_async_notify
)
1978 if (!ibqp
->uobject
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
1979 context
->rlkey_roce_mode
|= (1 << 4);
1982 * Before passing a kernel QP to the HW, make sure that the
1983 * ownership bits of the send queue are set and the SQ
1984 * headroom is stamped so that the hardware doesn't start
1985 * processing stale work requests.
1987 if (!ibqp
->uobject
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
) {
1988 struct mlx4_wqe_ctrl_seg
*ctrl
;
1991 for (i
= 0; i
< qp
->sq
.wqe_cnt
; ++i
) {
1992 ctrl
= get_send_wqe(qp
, i
);
1993 ctrl
->owner_opcode
= cpu_to_be32(1 << 31);
1994 if (qp
->sq_max_wqes_per_wr
== 1)
1995 ctrl
->qpn_vlan
.fence_size
=
1996 1 << (qp
->sq
.wqe_shift
- 4);
1998 stamp_send_wqe(qp
, i
, 1 << qp
->sq
.wqe_shift
);
2002 err
= mlx4_qp_modify(dev
->dev
, &qp
->mtt
, to_mlx4_state(cur_state
),
2003 to_mlx4_state(new_state
), context
, optpar
,
2004 sqd_event
, &qp
->mqp
);
2008 qp
->state
= new_state
;
2010 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
2011 qp
->atomic_rd_en
= attr
->qp_access_flags
;
2012 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
2013 qp
->resp_depth
= attr
->max_dest_rd_atomic
;
2014 if (attr_mask
& IB_QP_PORT
) {
2015 qp
->port
= attr
->port_num
;
2016 update_mcg_macs(dev
, qp
);
2018 if (attr_mask
& IB_QP_ALT_PATH
)
2019 qp
->alt_port
= attr
->alt_port_num
;
2021 if (is_sqp(dev
, qp
))
2022 store_sqp_attrs(to_msqp(qp
), attr
, attr_mask
);
2025 * If we moved QP0 to RTR, bring the IB link up; if we moved
2026 * QP0 to RESET or ERROR, bring the link back down.
2028 if (is_qp0(dev
, qp
)) {
2029 if (cur_state
!= IB_QPS_RTR
&& new_state
== IB_QPS_RTR
)
2030 if (mlx4_INIT_PORT(dev
->dev
, qp
->port
))
2031 pr_warn("INIT_PORT failed for port %d\n",
2034 if (cur_state
!= IB_QPS_RESET
&& cur_state
!= IB_QPS_ERR
&&
2035 (new_state
== IB_QPS_RESET
|| new_state
== IB_QPS_ERR
))
2036 mlx4_CLOSE_PORT(dev
->dev
, qp
->port
);
2040 * If we moved a kernel QP to RESET, clean up all old CQ
2041 * entries and reinitialize the QP.
2043 if (new_state
== IB_QPS_RESET
) {
2044 if (!ibqp
->uobject
) {
2045 mlx4_ib_cq_clean(recv_cq
, qp
->mqp
.qpn
,
2046 ibqp
->srq
? to_msrq(ibqp
->srq
) : NULL
);
2047 if (send_cq
!= recv_cq
)
2048 mlx4_ib_cq_clean(send_cq
, qp
->mqp
.qpn
, NULL
);
2054 qp
->sq_next_wqe
= 0;
2058 if (qp
->flags
& MLX4_IB_QP_NETIF
)
2059 mlx4_ib_steer_qp_reg(dev
, qp
, 0);
2061 if (qp
->pri
.smac
|| (!qp
->pri
.smac
&& qp
->pri
.smac_port
)) {
2062 mlx4_unregister_mac(dev
->dev
, qp
->pri
.smac_port
, qp
->pri
.smac
);
2064 qp
->pri
.smac_port
= 0;
2067 mlx4_unregister_mac(dev
->dev
, qp
->alt
.smac_port
, qp
->alt
.smac
);
2070 if (qp
->pri
.vid
< 0x1000) {
2071 mlx4_unregister_vlan(dev
->dev
, qp
->pri
.vlan_port
, qp
->pri
.vid
);
2072 qp
->pri
.vid
= 0xFFFF;
2073 qp
->pri
.candidate_vid
= 0xFFFF;
2074 qp
->pri
.update_vid
= 0;
2077 if (qp
->alt
.vid
< 0x1000) {
2078 mlx4_unregister_vlan(dev
->dev
, qp
->alt
.vlan_port
, qp
->alt
.vid
);
2079 qp
->alt
.vid
= 0xFFFF;
2080 qp
->alt
.candidate_vid
= 0xFFFF;
2081 qp
->alt
.update_vid
= 0;
2085 if (err
&& qp
->counter_index
)
2086 mlx4_ib_free_qp_counter(dev
, qp
);
2087 if (err
&& steer_qp
)
2088 mlx4_ib_steer_qp_reg(dev
, qp
, 0);
2090 if (qp
->pri
.candidate_smac
||
2091 (!qp
->pri
.candidate_smac
&& qp
->pri
.candidate_smac_port
)) {
2093 mlx4_unregister_mac(dev
->dev
, qp
->pri
.candidate_smac_port
, qp
->pri
.candidate_smac
);
2095 if (qp
->pri
.smac
|| (!qp
->pri
.smac
&& qp
->pri
.smac_port
))
2096 mlx4_unregister_mac(dev
->dev
, qp
->pri
.smac_port
, qp
->pri
.smac
);
2097 qp
->pri
.smac
= qp
->pri
.candidate_smac
;
2098 qp
->pri
.smac_index
= qp
->pri
.candidate_smac_index
;
2099 qp
->pri
.smac_port
= qp
->pri
.candidate_smac_port
;
2101 qp
->pri
.candidate_smac
= 0;
2102 qp
->pri
.candidate_smac_index
= 0;
2103 qp
->pri
.candidate_smac_port
= 0;
2105 if (qp
->alt
.candidate_smac
) {
2107 mlx4_unregister_mac(dev
->dev
, qp
->alt
.candidate_smac_port
, qp
->alt
.candidate_smac
);
2110 mlx4_unregister_mac(dev
->dev
, qp
->alt
.smac_port
, qp
->alt
.smac
);
2111 qp
->alt
.smac
= qp
->alt
.candidate_smac
;
2112 qp
->alt
.smac_index
= qp
->alt
.candidate_smac_index
;
2113 qp
->alt
.smac_port
= qp
->alt
.candidate_smac_port
;
2115 qp
->alt
.candidate_smac
= 0;
2116 qp
->alt
.candidate_smac_index
= 0;
2117 qp
->alt
.candidate_smac_port
= 0;
2120 if (qp
->pri
.update_vid
) {
2122 if (qp
->pri
.candidate_vid
< 0x1000)
2123 mlx4_unregister_vlan(dev
->dev
, qp
->pri
.candidate_vlan_port
,
2124 qp
->pri
.candidate_vid
);
2126 if (qp
->pri
.vid
< 0x1000)
2127 mlx4_unregister_vlan(dev
->dev
, qp
->pri
.vlan_port
,
2129 qp
->pri
.vid
= qp
->pri
.candidate_vid
;
2130 qp
->pri
.vlan_port
= qp
->pri
.candidate_vlan_port
;
2131 qp
->pri
.vlan_index
= qp
->pri
.candidate_vlan_index
;
2133 qp
->pri
.candidate_vid
= 0xFFFF;
2134 qp
->pri
.update_vid
= 0;
2137 if (qp
->alt
.update_vid
) {
2139 if (qp
->alt
.candidate_vid
< 0x1000)
2140 mlx4_unregister_vlan(dev
->dev
, qp
->alt
.candidate_vlan_port
,
2141 qp
->alt
.candidate_vid
);
2143 if (qp
->alt
.vid
< 0x1000)
2144 mlx4_unregister_vlan(dev
->dev
, qp
->alt
.vlan_port
,
2146 qp
->alt
.vid
= qp
->alt
.candidate_vid
;
2147 qp
->alt
.vlan_port
= qp
->alt
.candidate_vlan_port
;
2148 qp
->alt
.vlan_index
= qp
->alt
.candidate_vlan_index
;
2150 qp
->alt
.candidate_vid
= 0xFFFF;
2151 qp
->alt
.update_vid
= 0;
2157 static int _mlx4_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
2158 int attr_mask
, struct ib_udata
*udata
)
2160 struct mlx4_ib_dev
*dev
= to_mdev(ibqp
->device
);
2161 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
2162 enum ib_qp_state cur_state
, new_state
;
2165 mutex_lock(&qp
->mutex
);
2167 cur_state
= attr_mask
& IB_QP_CUR_STATE
? attr
->cur_qp_state
: qp
->state
;
2168 new_state
= attr_mask
& IB_QP_STATE
? attr
->qp_state
: cur_state
;
2170 if (cur_state
== new_state
&& cur_state
== IB_QPS_RESET
) {
2171 ll
= IB_LINK_LAYER_UNSPECIFIED
;
2173 int port
= attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
;
2174 ll
= rdma_port_get_link_layer(&dev
->ib_dev
, port
);
2177 if (!ib_modify_qp_is_ok(cur_state
, new_state
, ibqp
->qp_type
,
2179 pr_debug("qpn 0x%x: invalid attribute mask specified "
2180 "for transition %d to %d. qp_type %d,"
2181 " attr_mask 0x%x\n",
2182 ibqp
->qp_num
, cur_state
, new_state
,
2183 ibqp
->qp_type
, attr_mask
);
2187 if (mlx4_is_bonded(dev
->dev
) && (attr_mask
& IB_QP_PORT
)) {
2188 if ((cur_state
== IB_QPS_RESET
) && (new_state
== IB_QPS_INIT
)) {
2189 if ((ibqp
->qp_type
== IB_QPT_RC
) ||
2190 (ibqp
->qp_type
== IB_QPT_UD
) ||
2191 (ibqp
->qp_type
== IB_QPT_UC
) ||
2192 (ibqp
->qp_type
== IB_QPT_RAW_PACKET
) ||
2193 (ibqp
->qp_type
== IB_QPT_XRC_INI
)) {
2194 attr
->port_num
= mlx4_ib_bond_next_port(dev
);
2197 /* no sense in changing port_num
2198 * when ports are bonded */
2199 attr_mask
&= ~IB_QP_PORT
;
2203 if ((attr_mask
& IB_QP_PORT
) &&
2204 (attr
->port_num
== 0 || attr
->port_num
> dev
->num_ports
)) {
2205 pr_debug("qpn 0x%x: invalid port number (%d) specified "
2206 "for transition %d to %d. qp_type %d\n",
2207 ibqp
->qp_num
, attr
->port_num
, cur_state
,
2208 new_state
, ibqp
->qp_type
);
2212 if ((attr_mask
& IB_QP_PORT
) && (ibqp
->qp_type
== IB_QPT_RAW_PACKET
) &&
2213 (rdma_port_get_link_layer(&dev
->ib_dev
, attr
->port_num
) !=
2214 IB_LINK_LAYER_ETHERNET
))
2217 if (attr_mask
& IB_QP_PKEY_INDEX
) {
2218 int p
= attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
;
2219 if (attr
->pkey_index
>= dev
->dev
->caps
.pkey_table_len
[p
]) {
2220 pr_debug("qpn 0x%x: invalid pkey index (%d) specified "
2221 "for transition %d to %d. qp_type %d\n",
2222 ibqp
->qp_num
, attr
->pkey_index
, cur_state
,
2223 new_state
, ibqp
->qp_type
);
2228 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
&&
2229 attr
->max_rd_atomic
> dev
->dev
->caps
.max_qp_init_rdma
) {
2230 pr_debug("qpn 0x%x: max_rd_atomic (%d) too large. "
2231 "Transition %d to %d. qp_type %d\n",
2232 ibqp
->qp_num
, attr
->max_rd_atomic
, cur_state
,
2233 new_state
, ibqp
->qp_type
);
2237 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
&&
2238 attr
->max_dest_rd_atomic
> dev
->dev
->caps
.max_qp_dest_rdma
) {
2239 pr_debug("qpn 0x%x: max_dest_rd_atomic (%d) too large. "
2240 "Transition %d to %d. qp_type %d\n",
2241 ibqp
->qp_num
, attr
->max_dest_rd_atomic
, cur_state
,
2242 new_state
, ibqp
->qp_type
);
2246 if (cur_state
== new_state
&& cur_state
== IB_QPS_RESET
) {
2251 err
= __mlx4_ib_modify_qp(ibqp
, attr
, attr_mask
, cur_state
, new_state
);
2253 if (mlx4_is_bonded(dev
->dev
) && (attr_mask
& IB_QP_PORT
))
2257 mutex_unlock(&qp
->mutex
);
2261 int mlx4_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
2262 int attr_mask
, struct ib_udata
*udata
)
2264 struct mlx4_ib_qp
*mqp
= to_mqp(ibqp
);
2267 ret
= _mlx4_ib_modify_qp(ibqp
, attr
, attr_mask
, udata
);
2269 if (mqp
->mlx4_ib_qp_type
== MLX4_IB_QPT_GSI
) {
2270 struct mlx4_ib_sqp
*sqp
= to_msqp(mqp
);
2273 if (sqp
->roce_v2_gsi
)
2274 err
= ib_modify_qp(sqp
->roce_v2_gsi
, attr
, attr_mask
);
2276 pr_err("Failed to modify GSI QP for RoCEv2 (%d)\n",
2282 static int vf_get_qp0_qkey(struct mlx4_dev
*dev
, int qpn
, u32
*qkey
)
2285 for (i
= 0; i
< dev
->caps
.num_ports
; i
++) {
2286 if (qpn
== dev
->caps
.qp0_proxy
[i
] ||
2287 qpn
== dev
->caps
.qp0_tunnel
[i
]) {
2288 *qkey
= dev
->caps
.qp0_qkey
[i
];
2295 static int build_sriov_qp0_header(struct mlx4_ib_sqp
*sqp
,
2296 struct ib_ud_wr
*wr
,
2297 void *wqe
, unsigned *mlx_seg_len
)
2299 struct mlx4_ib_dev
*mdev
= to_mdev(sqp
->qp
.ibqp
.device
);
2300 struct ib_device
*ib_dev
= &mdev
->ib_dev
;
2301 struct mlx4_wqe_mlx_seg
*mlx
= wqe
;
2302 struct mlx4_wqe_inline_seg
*inl
= wqe
+ sizeof *mlx
;
2303 struct mlx4_ib_ah
*ah
= to_mah(wr
->ah
);
2311 if (wr
->wr
.opcode
!= IB_WR_SEND
)
2316 for (i
= 0; i
< wr
->wr
.num_sge
; ++i
)
2317 send_size
+= wr
->wr
.sg_list
[i
].length
;
2319 /* for proxy-qp0 sends, need to add in size of tunnel header */
2320 /* for tunnel-qp0 sends, tunnel header is already in s/g list */
2321 if (sqp
->qp
.mlx4_ib_qp_type
== MLX4_IB_QPT_PROXY_SMI_OWNER
)
2322 send_size
+= sizeof (struct mlx4_ib_tunnel_header
);
2324 ib_ud_header_init(send_size
, 1, 0, 0, 0, 0, 0, 0, &sqp
->ud_header
);
2326 if (sqp
->qp
.mlx4_ib_qp_type
== MLX4_IB_QPT_PROXY_SMI_OWNER
) {
2327 sqp
->ud_header
.lrh
.service_level
=
2328 be32_to_cpu(ah
->av
.ib
.sl_tclass_flowlabel
) >> 28;
2329 sqp
->ud_header
.lrh
.destination_lid
=
2330 cpu_to_be16(ah
->av
.ib
.g_slid
& 0x7f);
2331 sqp
->ud_header
.lrh
.source_lid
=
2332 cpu_to_be16(ah
->av
.ib
.g_slid
& 0x7f);
2335 mlx
->flags
&= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
);
2337 /* force loopback */
2338 mlx
->flags
|= cpu_to_be32(MLX4_WQE_MLX_VL15
| 0x1 | MLX4_WQE_MLX_SLR
);
2339 mlx
->rlid
= sqp
->ud_header
.lrh
.destination_lid
;
2341 sqp
->ud_header
.lrh
.virtual_lane
= 0;
2342 sqp
->ud_header
.bth
.solicited_event
= !!(wr
->wr
.send_flags
& IB_SEND_SOLICITED
);
2343 ib_get_cached_pkey(ib_dev
, sqp
->qp
.port
, 0, &pkey
);
2344 sqp
->ud_header
.bth
.pkey
= cpu_to_be16(pkey
);
2345 if (sqp
->qp
.mlx4_ib_qp_type
== MLX4_IB_QPT_TUN_SMI_OWNER
)
2346 sqp
->ud_header
.bth
.destination_qpn
= cpu_to_be32(wr
->remote_qpn
);
2348 sqp
->ud_header
.bth
.destination_qpn
=
2349 cpu_to_be32(mdev
->dev
->caps
.qp0_tunnel
[sqp
->qp
.port
- 1]);
2351 sqp
->ud_header
.bth
.psn
= cpu_to_be32((sqp
->send_psn
++) & ((1 << 24) - 1));
2352 if (mlx4_is_master(mdev
->dev
)) {
2353 if (mlx4_get_parav_qkey(mdev
->dev
, sqp
->qp
.mqp
.qpn
, &qkey
))
2356 if (vf_get_qp0_qkey(mdev
->dev
, sqp
->qp
.mqp
.qpn
, &qkey
))
2359 sqp
->ud_header
.deth
.qkey
= cpu_to_be32(qkey
);
2360 sqp
->ud_header
.deth
.source_qpn
= cpu_to_be32(sqp
->qp
.mqp
.qpn
);
2362 sqp
->ud_header
.bth
.opcode
= IB_OPCODE_UD_SEND_ONLY
;
2363 sqp
->ud_header
.immediate_present
= 0;
2365 header_size
= ib_ud_header_pack(&sqp
->ud_header
, sqp
->header_buf
);
2368 * Inline data segments may not cross a 64 byte boundary. If
2369 * our UD header is bigger than the space available up to the
2370 * next 64 byte boundary in the WQE, use two inline data
2371 * segments to hold the UD header.
2373 spc
= MLX4_INLINE_ALIGN
-
2374 ((unsigned long) (inl
+ 1) & (MLX4_INLINE_ALIGN
- 1));
2375 if (header_size
<= spc
) {
2376 inl
->byte_count
= cpu_to_be32(1 << 31 | header_size
);
2377 memcpy(inl
+ 1, sqp
->header_buf
, header_size
);
2380 inl
->byte_count
= cpu_to_be32(1 << 31 | spc
);
2381 memcpy(inl
+ 1, sqp
->header_buf
, spc
);
2383 inl
= (void *) (inl
+ 1) + spc
;
2384 memcpy(inl
+ 1, sqp
->header_buf
+ spc
, header_size
- spc
);
2386 * Need a barrier here to make sure all the data is
2387 * visible before the byte_count field is set.
2388 * Otherwise the HCA prefetcher could grab the 64-byte
2389 * chunk with this inline segment and get a valid (!=
2390 * 0xffffffff) byte count but stale data, and end up
2391 * generating a packet with bad headers.
2393 * The first inline segment's byte_count field doesn't
2394 * need a barrier, because it comes after a
2395 * control/MLX segment and therefore is at an offset
2399 inl
->byte_count
= cpu_to_be32(1 << 31 | (header_size
- spc
));
2404 ALIGN(i
* sizeof (struct mlx4_wqe_inline_seg
) + header_size
, 16);
2408 static u8
sl_to_vl(struct mlx4_ib_dev
*dev
, u8 sl
, int port_num
)
2410 union sl2vl_tbl_to_u64 tmp_vltab
;
2415 tmp_vltab
.sl64
= atomic64_read(&dev
->sl2vl
[port_num
- 1]);
2416 vl
= tmp_vltab
.sl8
[sl
>> 1];
2424 static int fill_gid_by_hw_index(struct mlx4_ib_dev
*ibdev
, u8 port_num
,
2425 int index
, union ib_gid
*gid
,
2426 enum ib_gid_type
*gid_type
)
2428 struct mlx4_ib_iboe
*iboe
= &ibdev
->iboe
;
2429 struct mlx4_port_gid_table
*port_gid_table
;
2430 unsigned long flags
;
2432 port_gid_table
= &iboe
->gids
[port_num
- 1];
2433 spin_lock_irqsave(&iboe
->lock
, flags
);
2434 memcpy(gid
, &port_gid_table
->gids
[index
].gid
, sizeof(*gid
));
2435 *gid_type
= port_gid_table
->gids
[index
].gid_type
;
2436 spin_unlock_irqrestore(&iboe
->lock
, flags
);
2437 if (!memcmp(gid
, &zgid
, sizeof(*gid
)))
2443 #define MLX4_ROCEV2_QP1_SPORT 0xC000
2444 static int build_mlx_header(struct mlx4_ib_sqp
*sqp
, struct ib_ud_wr
*wr
,
2445 void *wqe
, unsigned *mlx_seg_len
)
2447 struct ib_device
*ib_dev
= sqp
->qp
.ibqp
.device
;
2448 struct mlx4_ib_dev
*ibdev
= to_mdev(ib_dev
);
2449 struct mlx4_wqe_mlx_seg
*mlx
= wqe
;
2450 struct mlx4_wqe_ctrl_seg
*ctrl
= wqe
;
2451 struct mlx4_wqe_inline_seg
*inl
= wqe
+ sizeof *mlx
;
2452 struct mlx4_ib_ah
*ah
= to_mah(wr
->ah
);
2462 bool is_vlan
= false;
2464 bool is_udp
= false;
2468 for (i
= 0; i
< wr
->wr
.num_sge
; ++i
)
2469 send_size
+= wr
->wr
.sg_list
[i
].length
;
2471 is_eth
= rdma_port_get_link_layer(sqp
->qp
.ibqp
.device
, sqp
->qp
.port
) == IB_LINK_LAYER_ETHERNET
;
2472 is_grh
= mlx4_ib_ah_grh_present(ah
);
2474 enum ib_gid_type gid_type
;
2475 if (mlx4_is_mfunc(to_mdev(ib_dev
)->dev
)) {
2476 /* When multi-function is enabled, the ib_core gid
2477 * indexes don't necessarily match the hw ones, so
2478 * we must use our own cache */
2479 err
= mlx4_get_roce_gid_from_slave(to_mdev(ib_dev
)->dev
,
2480 be32_to_cpu(ah
->av
.ib
.port_pd
) >> 24,
2481 ah
->av
.ib
.gid_index
, &sgid
.raw
[0]);
2485 err
= fill_gid_by_hw_index(ibdev
, sqp
->qp
.port
,
2486 ah
->av
.ib
.gid_index
,
2489 is_udp
= gid_type
== IB_GID_TYPE_ROCE_UDP_ENCAP
;
2491 if (ipv6_addr_v4mapped((struct in6_addr
*)&sgid
))
2501 if (ah
->av
.eth
.vlan
!= cpu_to_be16(0xffff)) {
2502 vlan
= be16_to_cpu(ah
->av
.eth
.vlan
) & 0x0fff;
2506 err
= ib_ud_header_init(send_size
, !is_eth
, is_eth
, is_vlan
, is_grh
,
2507 ip_version
, is_udp
, 0, &sqp
->ud_header
);
2512 sqp
->ud_header
.lrh
.service_level
=
2513 be32_to_cpu(ah
->av
.ib
.sl_tclass_flowlabel
) >> 28;
2514 sqp
->ud_header
.lrh
.destination_lid
= ah
->av
.ib
.dlid
;
2515 sqp
->ud_header
.lrh
.source_lid
= cpu_to_be16(ah
->av
.ib
.g_slid
& 0x7f);
2518 if (is_grh
|| (ip_version
== 6)) {
2519 sqp
->ud_header
.grh
.traffic_class
=
2520 (be32_to_cpu(ah
->av
.ib
.sl_tclass_flowlabel
) >> 20) & 0xff;
2521 sqp
->ud_header
.grh
.flow_label
=
2522 ah
->av
.ib
.sl_tclass_flowlabel
& cpu_to_be32(0xfffff);
2523 sqp
->ud_header
.grh
.hop_limit
= ah
->av
.ib
.hop_limit
;
2525 memcpy(sqp
->ud_header
.grh
.source_gid
.raw
, sgid
.raw
, 16);
2527 if (mlx4_is_mfunc(to_mdev(ib_dev
)->dev
)) {
2528 /* When multi-function is enabled, the ib_core gid
2529 * indexes don't necessarily match the hw ones, so
2530 * we must use our own cache
2532 sqp
->ud_header
.grh
.source_gid
.global
.subnet_prefix
=
2533 cpu_to_be64(atomic64_read(&(to_mdev(ib_dev
)->sriov
.
2534 demux
[sqp
->qp
.port
- 1].
2536 sqp
->ud_header
.grh
.source_gid
.global
.interface_id
=
2537 to_mdev(ib_dev
)->sriov
.demux
[sqp
->qp
.port
- 1].
2538 guid_cache
[ah
->av
.ib
.gid_index
];
2540 ib_get_cached_gid(ib_dev
,
2541 be32_to_cpu(ah
->av
.ib
.port_pd
) >> 24,
2542 ah
->av
.ib
.gid_index
,
2543 &sqp
->ud_header
.grh
.source_gid
, NULL
);
2546 memcpy(sqp
->ud_header
.grh
.destination_gid
.raw
,
2547 ah
->av
.ib
.dgid
, 16);
2550 if (ip_version
== 4) {
2551 sqp
->ud_header
.ip4
.tos
=
2552 (be32_to_cpu(ah
->av
.ib
.sl_tclass_flowlabel
) >> 20) & 0xff;
2553 sqp
->ud_header
.ip4
.id
= 0;
2554 sqp
->ud_header
.ip4
.frag_off
= htons(IP_DF
);
2555 sqp
->ud_header
.ip4
.ttl
= ah
->av
.eth
.hop_limit
;
2557 memcpy(&sqp
->ud_header
.ip4
.saddr
,
2559 memcpy(&sqp
->ud_header
.ip4
.daddr
, ah
->av
.ib
.dgid
+ 12, 4);
2560 sqp
->ud_header
.ip4
.check
= ib_ud_ip4_csum(&sqp
->ud_header
);
2564 sqp
->ud_header
.udp
.dport
= htons(ROCE_V2_UDP_DPORT
);
2565 sqp
->ud_header
.udp
.sport
= htons(MLX4_ROCEV2_QP1_SPORT
);
2566 sqp
->ud_header
.udp
.csum
= 0;
2569 mlx
->flags
&= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
);
2572 mlx
->flags
|= cpu_to_be32((!sqp
->qp
.ibqp
.qp_num
? MLX4_WQE_MLX_VL15
: 0) |
2573 (sqp
->ud_header
.lrh
.destination_lid
==
2574 IB_LID_PERMISSIVE
? MLX4_WQE_MLX_SLR
: 0) |
2575 (sqp
->ud_header
.lrh
.service_level
<< 8));
2576 if (ah
->av
.ib
.port_pd
& cpu_to_be32(0x80000000))
2577 mlx
->flags
|= cpu_to_be32(0x1); /* force loopback */
2578 mlx
->rlid
= sqp
->ud_header
.lrh
.destination_lid
;
2581 switch (wr
->wr
.opcode
) {
2583 sqp
->ud_header
.bth
.opcode
= IB_OPCODE_UD_SEND_ONLY
;
2584 sqp
->ud_header
.immediate_present
= 0;
2586 case IB_WR_SEND_WITH_IMM
:
2587 sqp
->ud_header
.bth
.opcode
= IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE
;
2588 sqp
->ud_header
.immediate_present
= 1;
2589 sqp
->ud_header
.immediate_data
= wr
->wr
.ex
.imm_data
;
2596 struct in6_addr in6
;
2598 u16 pcp
= (be32_to_cpu(ah
->av
.ib
.sl_tclass_flowlabel
) >> 29) << 13;
2600 ether_type
= (!is_udp
) ? ETH_P_IBOE
:
2601 (ip_version
== 4 ? ETH_P_IP
: ETH_P_IPV6
);
2603 mlx
->sched_prio
= cpu_to_be16(pcp
);
2605 ether_addr_copy(sqp
->ud_header
.eth
.smac_h
, ah
->av
.eth
.s_mac
);
2606 memcpy(sqp
->ud_header
.eth
.dmac_h
, ah
->av
.eth
.mac
, 6);
2607 memcpy(&ctrl
->srcrb_flags16
[0], ah
->av
.eth
.mac
, 2);
2608 memcpy(&ctrl
->imm
, ah
->av
.eth
.mac
+ 2, 4);
2609 memcpy(&in6
, sgid
.raw
, sizeof(in6
));
2612 if (!memcmp(sqp
->ud_header
.eth
.smac_h
, sqp
->ud_header
.eth
.dmac_h
, 6))
2613 mlx
->flags
|= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK
);
2615 sqp
->ud_header
.eth
.type
= cpu_to_be16(ether_type
);
2617 sqp
->ud_header
.vlan
.type
= cpu_to_be16(ether_type
);
2618 sqp
->ud_header
.vlan
.tag
= cpu_to_be16(vlan
| pcp
);
2621 sqp
->ud_header
.lrh
.virtual_lane
= !sqp
->qp
.ibqp
.qp_num
? 15 :
2622 sl_to_vl(to_mdev(ib_dev
),
2623 sqp
->ud_header
.lrh
.service_level
,
2625 if (sqp
->qp
.ibqp
.qp_num
&& sqp
->ud_header
.lrh
.virtual_lane
== 15)
2627 if (sqp
->ud_header
.lrh
.destination_lid
== IB_LID_PERMISSIVE
)
2628 sqp
->ud_header
.lrh
.source_lid
= IB_LID_PERMISSIVE
;
2630 sqp
->ud_header
.bth
.solicited_event
= !!(wr
->wr
.send_flags
& IB_SEND_SOLICITED
);
2631 if (!sqp
->qp
.ibqp
.qp_num
)
2632 ib_get_cached_pkey(ib_dev
, sqp
->qp
.port
, sqp
->pkey_index
, &pkey
);
2634 ib_get_cached_pkey(ib_dev
, sqp
->qp
.port
, wr
->pkey_index
, &pkey
);
2635 sqp
->ud_header
.bth
.pkey
= cpu_to_be16(pkey
);
2636 sqp
->ud_header
.bth
.destination_qpn
= cpu_to_be32(wr
->remote_qpn
);
2637 sqp
->ud_header
.bth
.psn
= cpu_to_be32((sqp
->send_psn
++) & ((1 << 24) - 1));
2638 sqp
->ud_header
.deth
.qkey
= cpu_to_be32(wr
->remote_qkey
& 0x80000000 ?
2639 sqp
->qkey
: wr
->remote_qkey
);
2640 sqp
->ud_header
.deth
.source_qpn
= cpu_to_be32(sqp
->qp
.ibqp
.qp_num
);
2642 header_size
= ib_ud_header_pack(&sqp
->ud_header
, sqp
->header_buf
);
2645 pr_err("built UD header of size %d:\n", header_size
);
2646 for (i
= 0; i
< header_size
/ 4; ++i
) {
2648 pr_err(" [%02x] ", i
* 4);
2650 be32_to_cpu(((__be32
*) sqp
->header_buf
)[i
]));
2651 if ((i
+ 1) % 8 == 0)
2658 * Inline data segments may not cross a 64 byte boundary. If
2659 * our UD header is bigger than the space available up to the
2660 * next 64 byte boundary in the WQE, use two inline data
2661 * segments to hold the UD header.
2663 spc
= MLX4_INLINE_ALIGN
-
2664 ((unsigned long) (inl
+ 1) & (MLX4_INLINE_ALIGN
- 1));
2665 if (header_size
<= spc
) {
2666 inl
->byte_count
= cpu_to_be32(1 << 31 | header_size
);
2667 memcpy(inl
+ 1, sqp
->header_buf
, header_size
);
2670 inl
->byte_count
= cpu_to_be32(1 << 31 | spc
);
2671 memcpy(inl
+ 1, sqp
->header_buf
, spc
);
2673 inl
= (void *) (inl
+ 1) + spc
;
2674 memcpy(inl
+ 1, sqp
->header_buf
+ spc
, header_size
- spc
);
2676 * Need a barrier here to make sure all the data is
2677 * visible before the byte_count field is set.
2678 * Otherwise the HCA prefetcher could grab the 64-byte
2679 * chunk with this inline segment and get a valid (!=
2680 * 0xffffffff) byte count but stale data, and end up
2681 * generating a packet with bad headers.
2683 * The first inline segment's byte_count field doesn't
2684 * need a barrier, because it comes after a
2685 * control/MLX segment and therefore is at an offset
2689 inl
->byte_count
= cpu_to_be32(1 << 31 | (header_size
- spc
));
2694 ALIGN(i
* sizeof (struct mlx4_wqe_inline_seg
) + header_size
, 16);
2698 static int mlx4_wq_overflow(struct mlx4_ib_wq
*wq
, int nreq
, struct ib_cq
*ib_cq
)
2701 struct mlx4_ib_cq
*cq
;
2703 cur
= wq
->head
- wq
->tail
;
2704 if (likely(cur
+ nreq
< wq
->max_post
))
2708 spin_lock(&cq
->lock
);
2709 cur
= wq
->head
- wq
->tail
;
2710 spin_unlock(&cq
->lock
);
2712 return cur
+ nreq
>= wq
->max_post
;
2715 static __be32
convert_access(int acc
)
2717 return (acc
& IB_ACCESS_REMOTE_ATOMIC
?
2718 cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC
) : 0) |
2719 (acc
& IB_ACCESS_REMOTE_WRITE
?
2720 cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE
) : 0) |
2721 (acc
& IB_ACCESS_REMOTE_READ
?
2722 cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ
) : 0) |
2723 (acc
& IB_ACCESS_LOCAL_WRITE
? cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_WRITE
) : 0) |
2724 cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_READ
);
2727 static void set_reg_seg(struct mlx4_wqe_fmr_seg
*fseg
,
2728 struct ib_reg_wr
*wr
)
2730 struct mlx4_ib_mr
*mr
= to_mmr(wr
->mr
);
2732 fseg
->flags
= convert_access(wr
->access
);
2733 fseg
->mem_key
= cpu_to_be32(wr
->key
);
2734 fseg
->buf_list
= cpu_to_be64(mr
->page_map
);
2735 fseg
->start_addr
= cpu_to_be64(mr
->ibmr
.iova
);
2736 fseg
->reg_len
= cpu_to_be64(mr
->ibmr
.length
);
2737 fseg
->offset
= 0; /* XXX -- is this just for ZBVA? */
2738 fseg
->page_size
= cpu_to_be32(ilog2(mr
->ibmr
.page_size
));
2739 fseg
->reserved
[0] = 0;
2740 fseg
->reserved
[1] = 0;
2743 static void set_local_inv_seg(struct mlx4_wqe_local_inval_seg
*iseg
, u32 rkey
)
2745 memset(iseg
, 0, sizeof(*iseg
));
2746 iseg
->mem_key
= cpu_to_be32(rkey
);
2749 static __always_inline
void set_raddr_seg(struct mlx4_wqe_raddr_seg
*rseg
,
2750 u64 remote_addr
, u32 rkey
)
2752 rseg
->raddr
= cpu_to_be64(remote_addr
);
2753 rseg
->rkey
= cpu_to_be32(rkey
);
2757 static void set_atomic_seg(struct mlx4_wqe_atomic_seg
*aseg
,
2758 struct ib_atomic_wr
*wr
)
2760 if (wr
->wr
.opcode
== IB_WR_ATOMIC_CMP_AND_SWP
) {
2761 aseg
->swap_add
= cpu_to_be64(wr
->swap
);
2762 aseg
->compare
= cpu_to_be64(wr
->compare_add
);
2763 } else if (wr
->wr
.opcode
== IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
) {
2764 aseg
->swap_add
= cpu_to_be64(wr
->compare_add
);
2765 aseg
->compare
= cpu_to_be64(wr
->compare_add_mask
);
2767 aseg
->swap_add
= cpu_to_be64(wr
->compare_add
);
2773 static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg
*aseg
,
2774 struct ib_atomic_wr
*wr
)
2776 aseg
->swap_add
= cpu_to_be64(wr
->swap
);
2777 aseg
->swap_add_mask
= cpu_to_be64(wr
->swap_mask
);
2778 aseg
->compare
= cpu_to_be64(wr
->compare_add
);
2779 aseg
->compare_mask
= cpu_to_be64(wr
->compare_add_mask
);
2782 static void set_datagram_seg(struct mlx4_wqe_datagram_seg
*dseg
,
2783 struct ib_ud_wr
*wr
)
2785 memcpy(dseg
->av
, &to_mah(wr
->ah
)->av
, sizeof (struct mlx4_av
));
2786 dseg
->dqpn
= cpu_to_be32(wr
->remote_qpn
);
2787 dseg
->qkey
= cpu_to_be32(wr
->remote_qkey
);
2788 dseg
->vlan
= to_mah(wr
->ah
)->av
.eth
.vlan
;
2789 memcpy(dseg
->mac
, to_mah(wr
->ah
)->av
.eth
.mac
, 6);
2792 static void set_tunnel_datagram_seg(struct mlx4_ib_dev
*dev
,
2793 struct mlx4_wqe_datagram_seg
*dseg
,
2794 struct ib_ud_wr
*wr
,
2795 enum mlx4_ib_qp_type qpt
)
2797 union mlx4_ext_av
*av
= &to_mah(wr
->ah
)->av
;
2798 struct mlx4_av sqp_av
= {0};
2799 int port
= *((u8
*) &av
->ib
.port_pd
) & 0x3;
2801 /* force loopback */
2802 sqp_av
.port_pd
= av
->ib
.port_pd
| cpu_to_be32(0x80000000);
2803 sqp_av
.g_slid
= av
->ib
.g_slid
& 0x7f; /* no GRH */
2804 sqp_av
.sl_tclass_flowlabel
= av
->ib
.sl_tclass_flowlabel
&
2805 cpu_to_be32(0xf0000000);
2807 memcpy(dseg
->av
, &sqp_av
, sizeof (struct mlx4_av
));
2808 if (qpt
== MLX4_IB_QPT_PROXY_GSI
)
2809 dseg
->dqpn
= cpu_to_be32(dev
->dev
->caps
.qp1_tunnel
[port
- 1]);
2811 dseg
->dqpn
= cpu_to_be32(dev
->dev
->caps
.qp0_tunnel
[port
- 1]);
2812 /* Use QKEY from the QP context, which is set by master */
2813 dseg
->qkey
= cpu_to_be32(IB_QP_SET_QKEY
);
2816 static void build_tunnel_header(struct ib_ud_wr
*wr
, void *wqe
, unsigned *mlx_seg_len
)
2818 struct mlx4_wqe_inline_seg
*inl
= wqe
;
2819 struct mlx4_ib_tunnel_header hdr
;
2820 struct mlx4_ib_ah
*ah
= to_mah(wr
->ah
);
2824 memcpy(&hdr
.av
, &ah
->av
, sizeof hdr
.av
);
2825 hdr
.remote_qpn
= cpu_to_be32(wr
->remote_qpn
);
2826 hdr
.pkey_index
= cpu_to_be16(wr
->pkey_index
);
2827 hdr
.qkey
= cpu_to_be32(wr
->remote_qkey
);
2828 memcpy(hdr
.mac
, ah
->av
.eth
.mac
, 6);
2829 hdr
.vlan
= ah
->av
.eth
.vlan
;
2831 spc
= MLX4_INLINE_ALIGN
-
2832 ((unsigned long) (inl
+ 1) & (MLX4_INLINE_ALIGN
- 1));
2833 if (sizeof (hdr
) <= spc
) {
2834 memcpy(inl
+ 1, &hdr
, sizeof (hdr
));
2836 inl
->byte_count
= cpu_to_be32(1 << 31 | sizeof (hdr
));
2839 memcpy(inl
+ 1, &hdr
, spc
);
2841 inl
->byte_count
= cpu_to_be32(1 << 31 | spc
);
2843 inl
= (void *) (inl
+ 1) + spc
;
2844 memcpy(inl
+ 1, (void *) &hdr
+ spc
, sizeof (hdr
) - spc
);
2846 inl
->byte_count
= cpu_to_be32(1 << 31 | (sizeof (hdr
) - spc
));
2851 ALIGN(i
* sizeof (struct mlx4_wqe_inline_seg
) + sizeof (hdr
), 16);
2854 static void set_mlx_icrc_seg(void *dseg
)
2857 struct mlx4_wqe_inline_seg
*iseg
= dseg
;
2862 * Need a barrier here before writing the byte_count field to
2863 * make sure that all the data is visible before the
2864 * byte_count field is set. Otherwise, if the segment begins
2865 * a new cacheline, the HCA prefetcher could grab the 64-byte
2866 * chunk and get a valid (!= * 0xffffffff) byte count but
2867 * stale data, and end up sending the wrong data.
2871 iseg
->byte_count
= cpu_to_be32((1 << 31) | 4);
2874 static void set_data_seg(struct mlx4_wqe_data_seg
*dseg
, struct ib_sge
*sg
)
2876 dseg
->lkey
= cpu_to_be32(sg
->lkey
);
2877 dseg
->addr
= cpu_to_be64(sg
->addr
);
2880 * Need a barrier here before writing the byte_count field to
2881 * make sure that all the data is visible before the
2882 * byte_count field is set. Otherwise, if the segment begins
2883 * a new cacheline, the HCA prefetcher could grab the 64-byte
2884 * chunk and get a valid (!= * 0xffffffff) byte count but
2885 * stale data, and end up sending the wrong data.
2889 dseg
->byte_count
= cpu_to_be32(sg
->length
);
2892 static void __set_data_seg(struct mlx4_wqe_data_seg
*dseg
, struct ib_sge
*sg
)
2894 dseg
->byte_count
= cpu_to_be32(sg
->length
);
2895 dseg
->lkey
= cpu_to_be32(sg
->lkey
);
2896 dseg
->addr
= cpu_to_be64(sg
->addr
);
2899 static int build_lso_seg(struct mlx4_wqe_lso_seg
*wqe
, struct ib_ud_wr
*wr
,
2900 struct mlx4_ib_qp
*qp
, unsigned *lso_seg_len
,
2901 __be32
*lso_hdr_sz
, __be32
*blh
)
2903 unsigned halign
= ALIGN(sizeof *wqe
+ wr
->hlen
, 16);
2905 if (unlikely(halign
> MLX4_IB_CACHE_LINE_SIZE
))
2906 *blh
= cpu_to_be32(1 << 6);
2908 if (unlikely(!(qp
->flags
& MLX4_IB_QP_LSO
) &&
2909 wr
->wr
.num_sge
> qp
->sq
.max_gs
- (halign
>> 4)))
2912 memcpy(wqe
->header
, wr
->header
, wr
->hlen
);
2914 *lso_hdr_sz
= cpu_to_be32(wr
->mss
<< 16 | wr
->hlen
);
2915 *lso_seg_len
= halign
;
2919 static __be32
send_ieth(struct ib_send_wr
*wr
)
2921 switch (wr
->opcode
) {
2922 case IB_WR_SEND_WITH_IMM
:
2923 case IB_WR_RDMA_WRITE_WITH_IMM
:
2924 return wr
->ex
.imm_data
;
2926 case IB_WR_SEND_WITH_INV
:
2927 return cpu_to_be32(wr
->ex
.invalidate_rkey
);
2934 static void add_zero_len_inline(void *wqe
)
2936 struct mlx4_wqe_inline_seg
*inl
= wqe
;
2938 inl
->byte_count
= cpu_to_be32(1 << 31);
2941 int mlx4_ib_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
2942 struct ib_send_wr
**bad_wr
)
2944 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
2946 struct mlx4_wqe_ctrl_seg
*ctrl
;
2947 struct mlx4_wqe_data_seg
*dseg
;
2948 unsigned long flags
;
2952 int uninitialized_var(stamp
);
2953 int uninitialized_var(size
);
2954 unsigned uninitialized_var(seglen
);
2957 __be32
uninitialized_var(lso_hdr_sz
);
2960 struct mlx4_ib_dev
*mdev
= to_mdev(ibqp
->device
);
2962 if (qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_GSI
) {
2963 struct mlx4_ib_sqp
*sqp
= to_msqp(qp
);
2965 if (sqp
->roce_v2_gsi
) {
2966 struct mlx4_ib_ah
*ah
= to_mah(ud_wr(wr
)->ah
);
2967 enum ib_gid_type gid_type
;
2970 if (!fill_gid_by_hw_index(mdev
, sqp
->qp
.port
,
2971 ah
->av
.ib
.gid_index
,
2973 qp
= (gid_type
== IB_GID_TYPE_ROCE_UDP_ENCAP
) ?
2974 to_mqp(sqp
->roce_v2_gsi
) : qp
;
2976 pr_err("Failed to get gid at index %d. RoCEv2 will not work properly\n",
2977 ah
->av
.ib
.gid_index
);
2981 spin_lock_irqsave(&qp
->sq
.lock
, flags
);
2982 if (mdev
->dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
) {
2989 ind
= qp
->sq_next_wqe
;
2991 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
2995 if (mlx4_wq_overflow(&qp
->sq
, nreq
, qp
->ibqp
.send_cq
)) {
3001 if (unlikely(wr
->num_sge
> qp
->sq
.max_gs
)) {
3007 ctrl
= wqe
= get_send_wqe(qp
, ind
& (qp
->sq
.wqe_cnt
- 1));
3008 qp
->sq
.wrid
[(qp
->sq
.head
+ nreq
) & (qp
->sq
.wqe_cnt
- 1)] = wr
->wr_id
;
3011 (wr
->send_flags
& IB_SEND_SIGNALED
?
3012 cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
) : 0) |
3013 (wr
->send_flags
& IB_SEND_SOLICITED
?
3014 cpu_to_be32(MLX4_WQE_CTRL_SOLICITED
) : 0) |
3015 ((wr
->send_flags
& IB_SEND_IP_CSUM
) ?
3016 cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM
|
3017 MLX4_WQE_CTRL_TCP_UDP_CSUM
) : 0) |
3020 ctrl
->imm
= send_ieth(wr
);
3022 wqe
+= sizeof *ctrl
;
3023 size
= sizeof *ctrl
/ 16;
3025 switch (qp
->mlx4_ib_qp_type
) {
3026 case MLX4_IB_QPT_RC
:
3027 case MLX4_IB_QPT_UC
:
3028 switch (wr
->opcode
) {
3029 case IB_WR_ATOMIC_CMP_AND_SWP
:
3030 case IB_WR_ATOMIC_FETCH_AND_ADD
:
3031 case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
:
3032 set_raddr_seg(wqe
, atomic_wr(wr
)->remote_addr
,
3033 atomic_wr(wr
)->rkey
);
3034 wqe
+= sizeof (struct mlx4_wqe_raddr_seg
);
3036 set_atomic_seg(wqe
, atomic_wr(wr
));
3037 wqe
+= sizeof (struct mlx4_wqe_atomic_seg
);
3039 size
+= (sizeof (struct mlx4_wqe_raddr_seg
) +
3040 sizeof (struct mlx4_wqe_atomic_seg
)) / 16;
3044 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP
:
3045 set_raddr_seg(wqe
, atomic_wr(wr
)->remote_addr
,
3046 atomic_wr(wr
)->rkey
);
3047 wqe
+= sizeof (struct mlx4_wqe_raddr_seg
);
3049 set_masked_atomic_seg(wqe
, atomic_wr(wr
));
3050 wqe
+= sizeof (struct mlx4_wqe_masked_atomic_seg
);
3052 size
+= (sizeof (struct mlx4_wqe_raddr_seg
) +
3053 sizeof (struct mlx4_wqe_masked_atomic_seg
)) / 16;
3057 case IB_WR_RDMA_READ
:
3058 case IB_WR_RDMA_WRITE
:
3059 case IB_WR_RDMA_WRITE_WITH_IMM
:
3060 set_raddr_seg(wqe
, rdma_wr(wr
)->remote_addr
,
3062 wqe
+= sizeof (struct mlx4_wqe_raddr_seg
);
3063 size
+= sizeof (struct mlx4_wqe_raddr_seg
) / 16;
3066 case IB_WR_LOCAL_INV
:
3067 ctrl
->srcrb_flags
|=
3068 cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER
);
3069 set_local_inv_seg(wqe
, wr
->ex
.invalidate_rkey
);
3070 wqe
+= sizeof (struct mlx4_wqe_local_inval_seg
);
3071 size
+= sizeof (struct mlx4_wqe_local_inval_seg
) / 16;
3075 ctrl
->srcrb_flags
|=
3076 cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER
);
3077 set_reg_seg(wqe
, reg_wr(wr
));
3078 wqe
+= sizeof(struct mlx4_wqe_fmr_seg
);
3079 size
+= sizeof(struct mlx4_wqe_fmr_seg
) / 16;
3083 /* No extra segments required for sends */
3088 case MLX4_IB_QPT_TUN_SMI_OWNER
:
3089 err
= build_sriov_qp0_header(to_msqp(qp
), ud_wr(wr
),
3091 if (unlikely(err
)) {
3096 size
+= seglen
/ 16;
3098 case MLX4_IB_QPT_TUN_SMI
:
3099 case MLX4_IB_QPT_TUN_GSI
:
3100 /* this is a UD qp used in MAD responses to slaves. */
3101 set_datagram_seg(wqe
, ud_wr(wr
));
3102 /* set the forced-loopback bit in the data seg av */
3103 *(__be32
*) wqe
|= cpu_to_be32(0x80000000);
3104 wqe
+= sizeof (struct mlx4_wqe_datagram_seg
);
3105 size
+= sizeof (struct mlx4_wqe_datagram_seg
) / 16;
3107 case MLX4_IB_QPT_UD
:
3108 set_datagram_seg(wqe
, ud_wr(wr
));
3109 wqe
+= sizeof (struct mlx4_wqe_datagram_seg
);
3110 size
+= sizeof (struct mlx4_wqe_datagram_seg
) / 16;
3112 if (wr
->opcode
== IB_WR_LSO
) {
3113 err
= build_lso_seg(wqe
, ud_wr(wr
), qp
, &seglen
,
3115 if (unlikely(err
)) {
3119 lso_wqe
= (__be32
*) wqe
;
3121 size
+= seglen
/ 16;
3125 case MLX4_IB_QPT_PROXY_SMI_OWNER
:
3126 err
= build_sriov_qp0_header(to_msqp(qp
), ud_wr(wr
),
3128 if (unlikely(err
)) {
3133 size
+= seglen
/ 16;
3134 /* to start tunnel header on a cache-line boundary */
3135 add_zero_len_inline(wqe
);
3138 build_tunnel_header(ud_wr(wr
), wqe
, &seglen
);
3140 size
+= seglen
/ 16;
3142 case MLX4_IB_QPT_PROXY_SMI
:
3143 case MLX4_IB_QPT_PROXY_GSI
:
3144 /* If we are tunneling special qps, this is a UD qp.
3145 * In this case we first add a UD segment targeting
3146 * the tunnel qp, and then add a header with address
3148 set_tunnel_datagram_seg(to_mdev(ibqp
->device
), wqe
,
3150 qp
->mlx4_ib_qp_type
);
3151 wqe
+= sizeof (struct mlx4_wqe_datagram_seg
);
3152 size
+= sizeof (struct mlx4_wqe_datagram_seg
) / 16;
3153 build_tunnel_header(ud_wr(wr
), wqe
, &seglen
);
3155 size
+= seglen
/ 16;
3158 case MLX4_IB_QPT_SMI
:
3159 case MLX4_IB_QPT_GSI
:
3160 err
= build_mlx_header(to_msqp(qp
), ud_wr(wr
), ctrl
,
3162 if (unlikely(err
)) {
3167 size
+= seglen
/ 16;
3175 * Write data segments in reverse order, so as to
3176 * overwrite cacheline stamp last within each
3177 * cacheline. This avoids issues with WQE
3182 dseg
+= wr
->num_sge
- 1;
3183 size
+= wr
->num_sge
* (sizeof (struct mlx4_wqe_data_seg
) / 16);
3185 /* Add one more inline data segment for ICRC for MLX sends */
3186 if (unlikely(qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_SMI
||
3187 qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_GSI
||
3188 qp
->mlx4_ib_qp_type
&
3189 (MLX4_IB_QPT_PROXY_SMI_OWNER
| MLX4_IB_QPT_TUN_SMI_OWNER
))) {
3190 set_mlx_icrc_seg(dseg
+ 1);
3191 size
+= sizeof (struct mlx4_wqe_data_seg
) / 16;
3194 for (i
= wr
->num_sge
- 1; i
>= 0; --i
, --dseg
)
3195 set_data_seg(dseg
, wr
->sg_list
+ i
);
3198 * Possibly overwrite stamping in cacheline with LSO
3199 * segment only after making sure all data segments
3203 *lso_wqe
= lso_hdr_sz
;
3205 ctrl
->qpn_vlan
.fence_size
= (wr
->send_flags
& IB_SEND_FENCE
?
3206 MLX4_WQE_CTRL_FENCE
: 0) | size
;
3209 * Make sure descriptor is fully written before
3210 * setting ownership bit (because HW can start
3211 * executing as soon as we do).
3215 if (wr
->opcode
< 0 || wr
->opcode
>= ARRAY_SIZE(mlx4_ib_opcode
)) {
3221 ctrl
->owner_opcode
= mlx4_ib_opcode
[wr
->opcode
] |
3222 (ind
& qp
->sq
.wqe_cnt
? cpu_to_be32(1 << 31) : 0) | blh
;
3224 stamp
= ind
+ qp
->sq_spare_wqes
;
3225 ind
+= DIV_ROUND_UP(size
* 16, 1U << qp
->sq
.wqe_shift
);
3228 * We can improve latency by not stamping the last
3229 * send queue WQE until after ringing the doorbell, so
3230 * only stamp here if there are still more WQEs to post.
3232 * Same optimization applies to padding with NOP wqe
3233 * in case of WQE shrinking (used to prevent wrap-around
3234 * in the middle of WR).
3237 stamp_send_wqe(qp
, stamp
, size
* 16);
3238 ind
= pad_wraparound(qp
, ind
);
3244 qp
->sq
.head
+= nreq
;
3247 * Make sure that descriptors are written before
3252 writel(qp
->doorbell_qpn
,
3253 to_mdev(ibqp
->device
)->uar_map
+ MLX4_SEND_DOORBELL
);
3256 * Make sure doorbells don't leak out of SQ spinlock
3257 * and reach the HCA out of order.
3261 stamp_send_wqe(qp
, stamp
, size
* 16);
3263 ind
= pad_wraparound(qp
, ind
);
3264 qp
->sq_next_wqe
= ind
;
3267 spin_unlock_irqrestore(&qp
->sq
.lock
, flags
);
3272 int mlx4_ib_post_recv(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
3273 struct ib_recv_wr
**bad_wr
)
3275 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
3276 struct mlx4_wqe_data_seg
*scat
;
3277 unsigned long flags
;
3283 struct mlx4_ib_dev
*mdev
= to_mdev(ibqp
->device
);
3285 max_gs
= qp
->rq
.max_gs
;
3286 spin_lock_irqsave(&qp
->rq
.lock
, flags
);
3288 if (mdev
->dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
) {
3295 ind
= qp
->rq
.head
& (qp
->rq
.wqe_cnt
- 1);
3297 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
3298 if (mlx4_wq_overflow(&qp
->rq
, nreq
, qp
->ibqp
.recv_cq
)) {
3304 if (unlikely(wr
->num_sge
> qp
->rq
.max_gs
)) {
3310 scat
= get_recv_wqe(qp
, ind
);
3312 if (qp
->mlx4_ib_qp_type
& (MLX4_IB_QPT_PROXY_SMI_OWNER
|
3313 MLX4_IB_QPT_PROXY_SMI
| MLX4_IB_QPT_PROXY_GSI
)) {
3314 ib_dma_sync_single_for_device(ibqp
->device
,
3315 qp
->sqp_proxy_rcv
[ind
].map
,
3316 sizeof (struct mlx4_ib_proxy_sqp_hdr
),
3319 cpu_to_be32(sizeof (struct mlx4_ib_proxy_sqp_hdr
));
3320 /* use dma lkey from upper layer entry */
3321 scat
->lkey
= cpu_to_be32(wr
->sg_list
->lkey
);
3322 scat
->addr
= cpu_to_be64(qp
->sqp_proxy_rcv
[ind
].map
);
3327 for (i
= 0; i
< wr
->num_sge
; ++i
)
3328 __set_data_seg(scat
+ i
, wr
->sg_list
+ i
);
3331 scat
[i
].byte_count
= 0;
3332 scat
[i
].lkey
= cpu_to_be32(MLX4_INVALID_LKEY
);
3336 qp
->rq
.wrid
[ind
] = wr
->wr_id
;
3338 ind
= (ind
+ 1) & (qp
->rq
.wqe_cnt
- 1);
3343 qp
->rq
.head
+= nreq
;
3346 * Make sure that descriptors are written before
3351 *qp
->db
.db
= cpu_to_be32(qp
->rq
.head
& 0xffff);
3354 spin_unlock_irqrestore(&qp
->rq
.lock
, flags
);
3359 static inline enum ib_qp_state
to_ib_qp_state(enum mlx4_qp_state mlx4_state
)
3361 switch (mlx4_state
) {
3362 case MLX4_QP_STATE_RST
: return IB_QPS_RESET
;
3363 case MLX4_QP_STATE_INIT
: return IB_QPS_INIT
;
3364 case MLX4_QP_STATE_RTR
: return IB_QPS_RTR
;
3365 case MLX4_QP_STATE_RTS
: return IB_QPS_RTS
;
3366 case MLX4_QP_STATE_SQ_DRAINING
:
3367 case MLX4_QP_STATE_SQD
: return IB_QPS_SQD
;
3368 case MLX4_QP_STATE_SQER
: return IB_QPS_SQE
;
3369 case MLX4_QP_STATE_ERR
: return IB_QPS_ERR
;
3374 static inline enum ib_mig_state
to_ib_mig_state(int mlx4_mig_state
)
3376 switch (mlx4_mig_state
) {
3377 case MLX4_QP_PM_ARMED
: return IB_MIG_ARMED
;
3378 case MLX4_QP_PM_REARM
: return IB_MIG_REARM
;
3379 case MLX4_QP_PM_MIGRATED
: return IB_MIG_MIGRATED
;
3384 static int to_ib_qp_access_flags(int mlx4_flags
)
3388 if (mlx4_flags
& MLX4_QP_BIT_RRE
)
3389 ib_flags
|= IB_ACCESS_REMOTE_READ
;
3390 if (mlx4_flags
& MLX4_QP_BIT_RWE
)
3391 ib_flags
|= IB_ACCESS_REMOTE_WRITE
;
3392 if (mlx4_flags
& MLX4_QP_BIT_RAE
)
3393 ib_flags
|= IB_ACCESS_REMOTE_ATOMIC
;
3398 static void to_rdma_ah_attr(struct mlx4_ib_dev
*ibdev
,
3399 struct rdma_ah_attr
*ah_attr
,
3400 struct mlx4_qp_path
*path
)
3402 struct mlx4_dev
*dev
= ibdev
->dev
;
3403 u8 port_num
= path
->sched_queue
& 0x40 ? 2 : 1;
3405 memset(ah_attr
, 0, sizeof(*ah_attr
));
3406 ah_attr
->type
= rdma_ah_find_type(&ibdev
->ib_dev
, port_num
);
3407 if (port_num
== 0 || port_num
> dev
->caps
.num_ports
)
3410 if (ah_attr
->type
== RDMA_AH_ATTR_TYPE_ROCE
)
3411 rdma_ah_set_sl(ah_attr
, ((path
->sched_queue
>> 3) & 0x7) |
3412 ((path
->sched_queue
& 4) << 1));
3414 rdma_ah_set_sl(ah_attr
, (path
->sched_queue
>> 2) & 0xf);
3415 rdma_ah_set_port_num(ah_attr
, port_num
);
3417 rdma_ah_set_dlid(ah_attr
, be16_to_cpu(path
->rlid
));
3418 rdma_ah_set_path_bits(ah_attr
, path
->grh_mylmc
& 0x7f);
3419 rdma_ah_set_static_rate(ah_attr
,
3420 path
->static_rate
? path
->static_rate
- 5 : 0);
3421 if (path
->grh_mylmc
& (1 << 7)) {
3422 rdma_ah_set_grh(ah_attr
, NULL
,
3423 be32_to_cpu(path
->tclass_flowlabel
) & 0xfffff,
3426 (be32_to_cpu(path
->tclass_flowlabel
)
3428 rdma_ah_set_dgid_raw(ah_attr
, path
->rgid
);
3432 int mlx4_ib_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*qp_attr
, int qp_attr_mask
,
3433 struct ib_qp_init_attr
*qp_init_attr
)
3435 struct mlx4_ib_dev
*dev
= to_mdev(ibqp
->device
);
3436 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
3437 struct mlx4_qp_context context
;
3441 mutex_lock(&qp
->mutex
);
3443 if (qp
->state
== IB_QPS_RESET
) {
3444 qp_attr
->qp_state
= IB_QPS_RESET
;
3448 err
= mlx4_qp_query(dev
->dev
, &qp
->mqp
, &context
);
3454 mlx4_state
= be32_to_cpu(context
.flags
) >> 28;
3456 qp
->state
= to_ib_qp_state(mlx4_state
);
3457 qp_attr
->qp_state
= qp
->state
;
3458 qp_attr
->path_mtu
= context
.mtu_msgmax
>> 5;
3459 qp_attr
->path_mig_state
=
3460 to_ib_mig_state((be32_to_cpu(context
.flags
) >> 11) & 0x3);
3461 qp_attr
->qkey
= be32_to_cpu(context
.qkey
);
3462 qp_attr
->rq_psn
= be32_to_cpu(context
.rnr_nextrecvpsn
) & 0xffffff;
3463 qp_attr
->sq_psn
= be32_to_cpu(context
.next_send_psn
) & 0xffffff;
3464 qp_attr
->dest_qp_num
= be32_to_cpu(context
.remote_qpn
) & 0xffffff;
3465 qp_attr
->qp_access_flags
=
3466 to_ib_qp_access_flags(be32_to_cpu(context
.params2
));
3468 if (qp
->ibqp
.qp_type
== IB_QPT_RC
|| qp
->ibqp
.qp_type
== IB_QPT_UC
) {
3469 to_rdma_ah_attr(dev
, &qp_attr
->ah_attr
, &context
.pri_path
);
3470 to_rdma_ah_attr(dev
, &qp_attr
->alt_ah_attr
, &context
.alt_path
);
3471 qp_attr
->alt_pkey_index
= context
.alt_path
.pkey_index
& 0x7f;
3472 qp_attr
->alt_port_num
=
3473 rdma_ah_get_port_num(&qp_attr
->alt_ah_attr
);
3476 qp_attr
->pkey_index
= context
.pri_path
.pkey_index
& 0x7f;
3477 if (qp_attr
->qp_state
== IB_QPS_INIT
)
3478 qp_attr
->port_num
= qp
->port
;
3480 qp_attr
->port_num
= context
.pri_path
.sched_queue
& 0x40 ? 2 : 1;
3482 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
3483 qp_attr
->sq_draining
= mlx4_state
== MLX4_QP_STATE_SQ_DRAINING
;
3485 qp_attr
->max_rd_atomic
= 1 << ((be32_to_cpu(context
.params1
) >> 21) & 0x7);
3487 qp_attr
->max_dest_rd_atomic
=
3488 1 << ((be32_to_cpu(context
.params2
) >> 21) & 0x7);
3489 qp_attr
->min_rnr_timer
=
3490 (be32_to_cpu(context
.rnr_nextrecvpsn
) >> 24) & 0x1f;
3491 qp_attr
->timeout
= context
.pri_path
.ackto
>> 3;
3492 qp_attr
->retry_cnt
= (be32_to_cpu(context
.params1
) >> 16) & 0x7;
3493 qp_attr
->rnr_retry
= (be32_to_cpu(context
.params1
) >> 13) & 0x7;
3494 qp_attr
->alt_timeout
= context
.alt_path
.ackto
>> 3;
3497 qp_attr
->cur_qp_state
= qp_attr
->qp_state
;
3498 qp_attr
->cap
.max_recv_wr
= qp
->rq
.wqe_cnt
;
3499 qp_attr
->cap
.max_recv_sge
= qp
->rq
.max_gs
;
3501 if (!ibqp
->uobject
) {
3502 qp_attr
->cap
.max_send_wr
= qp
->sq
.wqe_cnt
;
3503 qp_attr
->cap
.max_send_sge
= qp
->sq
.max_gs
;
3505 qp_attr
->cap
.max_send_wr
= 0;
3506 qp_attr
->cap
.max_send_sge
= 0;
3510 * We don't support inline sends for kernel QPs (yet), and we
3511 * don't know what userspace's value should be.
3513 qp_attr
->cap
.max_inline_data
= 0;
3515 qp_init_attr
->cap
= qp_attr
->cap
;
3517 qp_init_attr
->create_flags
= 0;
3518 if (qp
->flags
& MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
)
3519 qp_init_attr
->create_flags
|= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
;
3521 if (qp
->flags
& MLX4_IB_QP_LSO
)
3522 qp_init_attr
->create_flags
|= IB_QP_CREATE_IPOIB_UD_LSO
;
3524 if (qp
->flags
& MLX4_IB_QP_NETIF
)
3525 qp_init_attr
->create_flags
|= IB_QP_CREATE_NETIF_QP
;
3527 qp_init_attr
->sq_sig_type
=
3528 qp
->sq_signal_bits
== cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
) ?
3529 IB_SIGNAL_ALL_WR
: IB_SIGNAL_REQ_WR
;
3532 mutex_unlock(&qp
->mutex
);