2 * Copyright (c) 2016-2017 Hisilicon Limited.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/acpi.h>
34 #include <linux/etherdevice.h>
35 #include <linux/interrupt.h>
36 #include <linux/kernel.h>
37 #include <linux/types.h>
38 #include <net/addrconf.h>
39 #include <rdma/ib_addr.h>
40 #include <rdma/ib_umem.h>
43 #include "hns_roce_common.h"
44 #include "hns_roce_device.h"
45 #include "hns_roce_cmd.h"
46 #include "hns_roce_hem.h"
47 #include "hns_roce_hw_v2.h"
49 static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg
*dseg
,
52 dseg
->lkey
= cpu_to_le32(sg
->lkey
);
53 dseg
->addr
= cpu_to_le64(sg
->addr
);
54 dseg
->len
= cpu_to_le32(sg
->length
);
57 static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe
*rc_sq_wqe
,
58 struct hns_roce_wqe_frmr_seg
*fseg
,
59 const struct ib_reg_wr
*wr
)
61 struct hns_roce_mr
*mr
= to_hr_mr(wr
->mr
);
63 /* use ib_access_flags */
64 roce_set_bit(rc_sq_wqe
->byte_4
,
65 V2_RC_FRMR_WQE_BYTE_4_BIND_EN_S
,
66 wr
->access
& IB_ACCESS_MW_BIND
? 1 : 0);
67 roce_set_bit(rc_sq_wqe
->byte_4
,
68 V2_RC_FRMR_WQE_BYTE_4_ATOMIC_S
,
69 wr
->access
& IB_ACCESS_REMOTE_ATOMIC
? 1 : 0);
70 roce_set_bit(rc_sq_wqe
->byte_4
,
71 V2_RC_FRMR_WQE_BYTE_4_RR_S
,
72 wr
->access
& IB_ACCESS_REMOTE_READ
? 1 : 0);
73 roce_set_bit(rc_sq_wqe
->byte_4
,
74 V2_RC_FRMR_WQE_BYTE_4_RW_S
,
75 wr
->access
& IB_ACCESS_REMOTE_WRITE
? 1 : 0);
76 roce_set_bit(rc_sq_wqe
->byte_4
,
77 V2_RC_FRMR_WQE_BYTE_4_LW_S
,
78 wr
->access
& IB_ACCESS_LOCAL_WRITE
? 1 : 0);
80 /* Data structure reuse may lead to confusion */
81 rc_sq_wqe
->msg_len
= cpu_to_le32(mr
->pbl_ba
& 0xffffffff);
82 rc_sq_wqe
->inv_key
= cpu_to_le32(mr
->pbl_ba
>> 32);
84 rc_sq_wqe
->byte_16
= cpu_to_le32(wr
->mr
->length
& 0xffffffff);
85 rc_sq_wqe
->byte_20
= cpu_to_le32(wr
->mr
->length
>> 32);
86 rc_sq_wqe
->rkey
= cpu_to_le32(wr
->key
);
87 rc_sq_wqe
->va
= cpu_to_le64(wr
->mr
->iova
);
89 fseg
->pbl_size
= cpu_to_le32(mr
->pbl_size
);
90 roce_set_field(fseg
->mode_buf_pg_sz
,
91 V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_M
,
92 V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_S
,
93 mr
->pbl_buf_pg_sz
+ PG_SHIFT_OFFSET
);
94 roce_set_bit(fseg
->mode_buf_pg_sz
,
95 V2_RC_FRMR_WQE_BYTE_40_BLK_MODE_S
, 0);
98 static void set_atomic_seg(struct hns_roce_wqe_atomic_seg
*aseg
,
99 const struct ib_atomic_wr
*wr
)
101 if (wr
->wr
.opcode
== IB_WR_ATOMIC_CMP_AND_SWP
) {
102 aseg
->fetchadd_swap_data
= cpu_to_le64(wr
->swap
);
103 aseg
->cmp_data
= cpu_to_le64(wr
->compare_add
);
105 aseg
->fetchadd_swap_data
= cpu_to_le64(wr
->compare_add
);
110 static void set_extend_sge(struct hns_roce_qp
*qp
, const struct ib_send_wr
*wr
,
111 unsigned int *sge_ind
)
113 struct hns_roce_v2_wqe_data_seg
*dseg
;
122 if (qp
->ibqp
.qp_type
== IB_QPT_RC
|| qp
->ibqp
.qp_type
== IB_QPT_UC
)
123 num_in_wqe
= HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE
;
124 extend_sge_num
= wr
->num_sge
- num_in_wqe
;
125 sg
= wr
->sg_list
+ num_in_wqe
;
126 shift
= qp
->hr_buf
.page_shift
;
129 * Check whether wr->num_sge sges are in the same page. If not, we
130 * should calculate how many sges in the first page and the second
133 dseg
= get_send_extend_sge(qp
, (*sge_ind
) & (qp
->sge
.sge_cnt
- 1));
134 fi_sge_num
= (round_up((uintptr_t)dseg
, 1 << shift
) -
136 sizeof(struct hns_roce_v2_wqe_data_seg
);
137 if (extend_sge_num
> fi_sge_num
) {
138 se_sge_num
= extend_sge_num
- fi_sge_num
;
139 for (i
= 0; i
< fi_sge_num
; i
++) {
140 set_data_seg_v2(dseg
++, sg
+ i
);
143 dseg
= get_send_extend_sge(qp
,
144 (*sge_ind
) & (qp
->sge
.sge_cnt
- 1));
145 for (i
= 0; i
< se_sge_num
; i
++) {
146 set_data_seg_v2(dseg
++, sg
+ fi_sge_num
+ i
);
150 for (i
= 0; i
< extend_sge_num
; i
++) {
151 set_data_seg_v2(dseg
++, sg
+ i
);
157 static int set_rwqe_data_seg(struct ib_qp
*ibqp
, const struct ib_send_wr
*wr
,
158 struct hns_roce_v2_rc_send_wqe
*rc_sq_wqe
,
159 void *wqe
, unsigned int *sge_ind
,
160 const struct ib_send_wr
**bad_wr
)
162 struct hns_roce_dev
*hr_dev
= to_hr_dev(ibqp
->device
);
163 struct hns_roce_v2_wqe_data_seg
*dseg
= wqe
;
164 struct hns_roce_qp
*qp
= to_hr_qp(ibqp
);
167 if (wr
->send_flags
& IB_SEND_INLINE
&& wr
->num_sge
) {
168 if (le32_to_cpu(rc_sq_wqe
->msg_len
) >
169 hr_dev
->caps
.max_sq_inline
) {
171 dev_err(hr_dev
->dev
, "inline len(1-%d)=%d, illegal",
172 rc_sq_wqe
->msg_len
, hr_dev
->caps
.max_sq_inline
);
176 if (wr
->opcode
== IB_WR_RDMA_READ
) {
178 dev_err(hr_dev
->dev
, "Not support inline data!\n");
182 for (i
= 0; i
< wr
->num_sge
; i
++) {
183 memcpy(wqe
, ((void *)wr
->sg_list
[i
].addr
),
184 wr
->sg_list
[i
].length
);
185 wqe
+= wr
->sg_list
[i
].length
;
188 roce_set_bit(rc_sq_wqe
->byte_4
, V2_RC_SEND_WQE_BYTE_4_INLINE_S
,
191 if (wr
->num_sge
<= HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE
) {
192 for (i
= 0; i
< wr
->num_sge
; i
++) {
193 if (likely(wr
->sg_list
[i
].length
)) {
194 set_data_seg_v2(dseg
, wr
->sg_list
+ i
);
199 roce_set_field(rc_sq_wqe
->byte_20
,
200 V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M
,
201 V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S
,
202 (*sge_ind
) & (qp
->sge
.sge_cnt
- 1));
204 for (i
= 0; i
< HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE
; i
++) {
205 if (likely(wr
->sg_list
[i
].length
)) {
206 set_data_seg_v2(dseg
, wr
->sg_list
+ i
);
211 set_extend_sge(qp
, wr
, sge_ind
);
214 roce_set_field(rc_sq_wqe
->byte_16
,
215 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M
,
216 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S
, wr
->num_sge
);
222 static int hns_roce_v2_modify_qp(struct ib_qp
*ibqp
,
223 const struct ib_qp_attr
*attr
,
224 int attr_mask
, enum ib_qp_state cur_state
,
225 enum ib_qp_state new_state
);
227 static int hns_roce_v2_post_send(struct ib_qp
*ibqp
,
228 const struct ib_send_wr
*wr
,
229 const struct ib_send_wr
**bad_wr
)
231 struct hns_roce_dev
*hr_dev
= to_hr_dev(ibqp
->device
);
232 struct hns_roce_ah
*ah
= to_hr_ah(ud_wr(wr
)->ah
);
233 struct hns_roce_v2_ud_send_wqe
*ud_sq_wqe
;
234 struct hns_roce_v2_rc_send_wqe
*rc_sq_wqe
;
235 struct hns_roce_qp
*qp
= to_hr_qp(ibqp
);
236 struct hns_roce_wqe_frmr_seg
*fseg
;
237 struct device
*dev
= hr_dev
->dev
;
238 struct hns_roce_v2_db sq_db
;
239 struct ib_qp_attr attr
;
240 unsigned int sge_ind
= 0;
241 unsigned int owner_bit
;
254 if (unlikely(ibqp
->qp_type
!= IB_QPT_RC
&&
255 ibqp
->qp_type
!= IB_QPT_GSI
&&
256 ibqp
->qp_type
!= IB_QPT_UD
)) {
257 dev_err(dev
, "Not supported QP(0x%x)type!\n", ibqp
->qp_type
);
262 if (unlikely(qp
->state
== IB_QPS_RESET
|| qp
->state
== IB_QPS_INIT
||
263 qp
->state
== IB_QPS_RTR
)) {
264 dev_err(dev
, "Post WQE fail, QP state %d err!\n", qp
->state
);
269 spin_lock_irqsave(&qp
->sq
.lock
, flags
);
270 ind
= qp
->sq_next_wqe
;
271 sge_ind
= qp
->next_sge
;
273 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
274 if (hns_roce_wq_overflow(&qp
->sq
, nreq
, qp
->ibqp
.send_cq
)) {
280 if (unlikely(wr
->num_sge
> qp
->sq
.max_gs
)) {
281 dev_err(dev
, "num_sge=%d > qp->sq.max_gs=%d\n",
282 wr
->num_sge
, qp
->sq
.max_gs
);
288 wqe
= get_send_wqe(qp
, ind
& (qp
->sq
.wqe_cnt
- 1));
289 qp
->sq
.wrid
[(qp
->sq
.head
+ nreq
) & (qp
->sq
.wqe_cnt
- 1)] =
293 ~(((qp
->sq
.head
+ nreq
) >> ilog2(qp
->sq
.wqe_cnt
)) & 0x1);
296 /* Corresponding to the QP type, wqe process separately */
297 if (ibqp
->qp_type
== IB_QPT_GSI
) {
299 memset(ud_sq_wqe
, 0, sizeof(*ud_sq_wqe
));
301 roce_set_field(ud_sq_wqe
->dmac
, V2_UD_SEND_WQE_DMAC_0_M
,
302 V2_UD_SEND_WQE_DMAC_0_S
, ah
->av
.mac
[0]);
303 roce_set_field(ud_sq_wqe
->dmac
, V2_UD_SEND_WQE_DMAC_1_M
,
304 V2_UD_SEND_WQE_DMAC_1_S
, ah
->av
.mac
[1]);
305 roce_set_field(ud_sq_wqe
->dmac
, V2_UD_SEND_WQE_DMAC_2_M
,
306 V2_UD_SEND_WQE_DMAC_2_S
, ah
->av
.mac
[2]);
307 roce_set_field(ud_sq_wqe
->dmac
, V2_UD_SEND_WQE_DMAC_3_M
,
308 V2_UD_SEND_WQE_DMAC_3_S
, ah
->av
.mac
[3]);
309 roce_set_field(ud_sq_wqe
->byte_48
,
310 V2_UD_SEND_WQE_BYTE_48_DMAC_4_M
,
311 V2_UD_SEND_WQE_BYTE_48_DMAC_4_S
,
313 roce_set_field(ud_sq_wqe
->byte_48
,
314 V2_UD_SEND_WQE_BYTE_48_DMAC_5_M
,
315 V2_UD_SEND_WQE_BYTE_48_DMAC_5_S
,
319 smac
= (u8
*)hr_dev
->dev_addr
[qp
->port
];
320 loopback
= ether_addr_equal_unaligned(ah
->av
.mac
,
323 roce_set_bit(ud_sq_wqe
->byte_40
,
324 V2_UD_SEND_WQE_BYTE_40_LBI_S
, loopback
);
326 roce_set_field(ud_sq_wqe
->byte_4
,
327 V2_UD_SEND_WQE_BYTE_4_OPCODE_M
,
328 V2_UD_SEND_WQE_BYTE_4_OPCODE_S
,
329 HNS_ROCE_V2_WQE_OP_SEND
);
331 for (i
= 0; i
< wr
->num_sge
; i
++)
332 tmp_len
+= wr
->sg_list
[i
].length
;
335 cpu_to_le32(le32_to_cpu(ud_sq_wqe
->msg_len
) + tmp_len
);
337 switch (wr
->opcode
) {
338 case IB_WR_SEND_WITH_IMM
:
339 case IB_WR_RDMA_WRITE_WITH_IMM
:
340 ud_sq_wqe
->immtdata
=
341 cpu_to_le32(be32_to_cpu(wr
->ex
.imm_data
));
344 ud_sq_wqe
->immtdata
= 0;
349 roce_set_bit(ud_sq_wqe
->byte_4
,
350 V2_UD_SEND_WQE_BYTE_4_CQE_S
,
351 (wr
->send_flags
& IB_SEND_SIGNALED
) ? 1 : 0);
354 roce_set_bit(ud_sq_wqe
->byte_4
,
355 V2_UD_SEND_WQE_BYTE_4_SE_S
,
356 (wr
->send_flags
& IB_SEND_SOLICITED
) ? 1 : 0);
358 roce_set_bit(ud_sq_wqe
->byte_4
,
359 V2_UD_SEND_WQE_BYTE_4_OWNER_S
, owner_bit
);
361 roce_set_field(ud_sq_wqe
->byte_16
,
362 V2_UD_SEND_WQE_BYTE_16_PD_M
,
363 V2_UD_SEND_WQE_BYTE_16_PD_S
,
364 to_hr_pd(ibqp
->pd
)->pdn
);
366 roce_set_field(ud_sq_wqe
->byte_16
,
367 V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M
,
368 V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S
,
371 roce_set_field(ud_sq_wqe
->byte_20
,
372 V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M
,
373 V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S
,
374 sge_ind
& (qp
->sge
.sge_cnt
- 1));
376 roce_set_field(ud_sq_wqe
->byte_24
,
377 V2_UD_SEND_WQE_BYTE_24_UDPSPN_M
,
378 V2_UD_SEND_WQE_BYTE_24_UDPSPN_S
, 0);
380 cpu_to_le32(ud_wr(wr
)->remote_qkey
& 0x80000000 ?
381 qp
->qkey
: ud_wr(wr
)->remote_qkey
);
382 roce_set_field(ud_sq_wqe
->byte_32
,
383 V2_UD_SEND_WQE_BYTE_32_DQPN_M
,
384 V2_UD_SEND_WQE_BYTE_32_DQPN_S
,
385 ud_wr(wr
)->remote_qpn
);
387 roce_set_field(ud_sq_wqe
->byte_36
,
388 V2_UD_SEND_WQE_BYTE_36_VLAN_M
,
389 V2_UD_SEND_WQE_BYTE_36_VLAN_S
,
390 le16_to_cpu(ah
->av
.vlan
));
391 roce_set_field(ud_sq_wqe
->byte_36
,
392 V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M
,
393 V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S
,
395 roce_set_field(ud_sq_wqe
->byte_36
,
396 V2_UD_SEND_WQE_BYTE_36_TCLASS_M
,
397 V2_UD_SEND_WQE_BYTE_36_TCLASS_S
,
398 ah
->av
.sl_tclass_flowlabel
>>
399 HNS_ROCE_TCLASS_SHIFT
);
400 roce_set_field(ud_sq_wqe
->byte_40
,
401 V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M
,
402 V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S
,
403 ah
->av
.sl_tclass_flowlabel
&
404 HNS_ROCE_FLOW_LABEL_MASK
);
405 roce_set_field(ud_sq_wqe
->byte_40
,
406 V2_UD_SEND_WQE_BYTE_40_SL_M
,
407 V2_UD_SEND_WQE_BYTE_40_SL_S
,
408 le32_to_cpu(ah
->av
.sl_tclass_flowlabel
) >>
410 roce_set_field(ud_sq_wqe
->byte_40
,
411 V2_UD_SEND_WQE_BYTE_40_PORTN_M
,
412 V2_UD_SEND_WQE_BYTE_40_PORTN_S
,
415 roce_set_bit(ud_sq_wqe
->byte_40
,
416 V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S
,
417 ah
->av
.vlan_en
? 1 : 0);
418 roce_set_field(ud_sq_wqe
->byte_48
,
419 V2_UD_SEND_WQE_BYTE_48_SGID_INDX_M
,
420 V2_UD_SEND_WQE_BYTE_48_SGID_INDX_S
,
421 hns_get_gid_index(hr_dev
, qp
->phy_port
,
424 memcpy(&ud_sq_wqe
->dgid
[0], &ah
->av
.dgid
[0],
427 set_extend_sge(qp
, wr
, &sge_ind
);
429 } else if (ibqp
->qp_type
== IB_QPT_RC
) {
431 memset(rc_sq_wqe
, 0, sizeof(*rc_sq_wqe
));
432 for (i
= 0; i
< wr
->num_sge
; i
++)
433 tmp_len
+= wr
->sg_list
[i
].length
;
436 cpu_to_le32(le32_to_cpu(rc_sq_wqe
->msg_len
) + tmp_len
);
438 switch (wr
->opcode
) {
439 case IB_WR_SEND_WITH_IMM
:
440 case IB_WR_RDMA_WRITE_WITH_IMM
:
441 rc_sq_wqe
->immtdata
=
442 cpu_to_le32(be32_to_cpu(wr
->ex
.imm_data
));
444 case IB_WR_SEND_WITH_INV
:
446 cpu_to_le32(wr
->ex
.invalidate_rkey
);
449 rc_sq_wqe
->immtdata
= 0;
453 roce_set_bit(rc_sq_wqe
->byte_4
,
454 V2_RC_SEND_WQE_BYTE_4_FENCE_S
,
455 (wr
->send_flags
& IB_SEND_FENCE
) ? 1 : 0);
457 roce_set_bit(rc_sq_wqe
->byte_4
,
458 V2_RC_SEND_WQE_BYTE_4_SE_S
,
459 (wr
->send_flags
& IB_SEND_SOLICITED
) ? 1 : 0);
461 roce_set_bit(rc_sq_wqe
->byte_4
,
462 V2_RC_SEND_WQE_BYTE_4_CQE_S
,
463 (wr
->send_flags
& IB_SEND_SIGNALED
) ? 1 : 0);
465 roce_set_bit(rc_sq_wqe
->byte_4
,
466 V2_RC_SEND_WQE_BYTE_4_OWNER_S
, owner_bit
);
468 wqe
+= sizeof(struct hns_roce_v2_rc_send_wqe
);
469 switch (wr
->opcode
) {
470 case IB_WR_RDMA_READ
:
471 hr_op
= HNS_ROCE_V2_WQE_OP_RDMA_READ
;
473 cpu_to_le32(rdma_wr(wr
)->rkey
);
475 cpu_to_le64(rdma_wr(wr
)->remote_addr
);
477 case IB_WR_RDMA_WRITE
:
478 hr_op
= HNS_ROCE_V2_WQE_OP_RDMA_WRITE
;
480 cpu_to_le32(rdma_wr(wr
)->rkey
);
482 cpu_to_le64(rdma_wr(wr
)->remote_addr
);
484 case IB_WR_RDMA_WRITE_WITH_IMM
:
485 hr_op
= HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM
;
487 cpu_to_le32(rdma_wr(wr
)->rkey
);
489 cpu_to_le64(rdma_wr(wr
)->remote_addr
);
492 hr_op
= HNS_ROCE_V2_WQE_OP_SEND
;
494 case IB_WR_SEND_WITH_INV
:
495 hr_op
= HNS_ROCE_V2_WQE_OP_SEND_WITH_INV
;
497 case IB_WR_SEND_WITH_IMM
:
498 hr_op
= HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM
;
500 case IB_WR_LOCAL_INV
:
501 hr_op
= HNS_ROCE_V2_WQE_OP_LOCAL_INV
;
502 roce_set_bit(rc_sq_wqe
->byte_4
,
503 V2_RC_SEND_WQE_BYTE_4_SO_S
, 1);
505 cpu_to_le32(wr
->ex
.invalidate_rkey
);
508 hr_op
= HNS_ROCE_V2_WQE_OP_FAST_REG_PMR
;
510 set_frmr_seg(rc_sq_wqe
, fseg
, reg_wr(wr
));
512 case IB_WR_ATOMIC_CMP_AND_SWP
:
513 hr_op
= HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP
;
515 cpu_to_le32(atomic_wr(wr
)->rkey
);
517 cpu_to_le64(atomic_wr(wr
)->remote_addr
);
519 case IB_WR_ATOMIC_FETCH_AND_ADD
:
520 hr_op
= HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD
;
522 cpu_to_le32(atomic_wr(wr
)->rkey
);
524 cpu_to_le64(atomic_wr(wr
)->remote_addr
);
526 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP
:
528 HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP
;
530 case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
:
532 HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD
;
535 hr_op
= HNS_ROCE_V2_WQE_OP_MASK
;
539 roce_set_field(rc_sq_wqe
->byte_4
,
540 V2_RC_SEND_WQE_BYTE_4_OPCODE_M
,
541 V2_RC_SEND_WQE_BYTE_4_OPCODE_S
, hr_op
);
543 if (wr
->opcode
== IB_WR_ATOMIC_CMP_AND_SWP
||
544 wr
->opcode
== IB_WR_ATOMIC_FETCH_AND_ADD
) {
545 struct hns_roce_v2_wqe_data_seg
*dseg
;
548 set_data_seg_v2(dseg
, wr
->sg_list
);
549 wqe
+= sizeof(struct hns_roce_v2_wqe_data_seg
);
550 set_atomic_seg(wqe
, atomic_wr(wr
));
551 roce_set_field(rc_sq_wqe
->byte_16
,
552 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M
,
553 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S
,
555 } else if (wr
->opcode
!= IB_WR_REG_MR
) {
556 ret
= set_rwqe_data_seg(ibqp
, wr
, rc_sq_wqe
,
557 wqe
, &sge_ind
, bad_wr
);
564 dev_err(dev
, "Illegal qp_type(0x%x)\n", ibqp
->qp_type
);
565 spin_unlock_irqrestore(&qp
->sq
.lock
, flags
);
580 roce_set_field(sq_db
.byte_4
, V2_DB_BYTE_4_TAG_M
,
581 V2_DB_BYTE_4_TAG_S
, qp
->doorbell_qpn
);
582 roce_set_field(sq_db
.byte_4
, V2_DB_BYTE_4_CMD_M
,
583 V2_DB_BYTE_4_CMD_S
, HNS_ROCE_V2_SQ_DB
);
584 roce_set_field(sq_db
.parameter
, V2_DB_PARAMETER_IDX_M
,
585 V2_DB_PARAMETER_IDX_S
,
586 qp
->sq
.head
& ((qp
->sq
.wqe_cnt
<< 1) - 1));
587 roce_set_field(sq_db
.parameter
, V2_DB_PARAMETER_SL_M
,
588 V2_DB_PARAMETER_SL_S
, qp
->sl
);
590 hns_roce_write64_k((__le32
*)&sq_db
, qp
->sq
.db_reg_l
);
592 qp
->sq_next_wqe
= ind
;
593 qp
->next_sge
= sge_ind
;
595 if (qp
->state
== IB_QPS_ERR
) {
596 attr_mask
= IB_QP_STATE
;
597 attr
.qp_state
= IB_QPS_ERR
;
599 ret
= hns_roce_v2_modify_qp(&qp
->ibqp
, &attr
, attr_mask
,
600 qp
->state
, IB_QPS_ERR
);
602 spin_unlock_irqrestore(&qp
->sq
.lock
, flags
);
609 spin_unlock_irqrestore(&qp
->sq
.lock
, flags
);
614 static int hns_roce_v2_post_recv(struct ib_qp
*ibqp
,
615 const struct ib_recv_wr
*wr
,
616 const struct ib_recv_wr
**bad_wr
)
618 struct hns_roce_dev
*hr_dev
= to_hr_dev(ibqp
->device
);
619 struct hns_roce_qp
*hr_qp
= to_hr_qp(ibqp
);
620 struct hns_roce_v2_wqe_data_seg
*dseg
;
621 struct hns_roce_rinl_sge
*sge_list
;
622 struct device
*dev
= hr_dev
->dev
;
623 struct ib_qp_attr attr
;
632 spin_lock_irqsave(&hr_qp
->rq
.lock
, flags
);
633 ind
= hr_qp
->rq
.head
& (hr_qp
->rq
.wqe_cnt
- 1);
635 if (hr_qp
->state
== IB_QPS_RESET
) {
636 spin_unlock_irqrestore(&hr_qp
->rq
.lock
, flags
);
641 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
642 if (hns_roce_wq_overflow(&hr_qp
->rq
, nreq
,
643 hr_qp
->ibqp
.recv_cq
)) {
649 if (unlikely(wr
->num_sge
> hr_qp
->rq
.max_gs
)) {
650 dev_err(dev
, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
651 wr
->num_sge
, hr_qp
->rq
.max_gs
);
657 wqe
= get_recv_wqe(hr_qp
, ind
);
658 dseg
= (struct hns_roce_v2_wqe_data_seg
*)wqe
;
659 for (i
= 0; i
< wr
->num_sge
; i
++) {
660 if (!wr
->sg_list
[i
].length
)
662 set_data_seg_v2(dseg
, wr
->sg_list
+ i
);
666 if (i
< hr_qp
->rq
.max_gs
) {
667 dseg
->lkey
= cpu_to_le32(HNS_ROCE_INVALID_LKEY
);
671 /* rq support inline data */
672 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_RQ_INLINE
) {
673 sge_list
= hr_qp
->rq_inl_buf
.wqe_list
[ind
].sg_list
;
674 hr_qp
->rq_inl_buf
.wqe_list
[ind
].sge_cnt
=
676 for (i
= 0; i
< wr
->num_sge
; i
++) {
678 (void *)(u64
)wr
->sg_list
[i
].addr
;
679 sge_list
[i
].len
= wr
->sg_list
[i
].length
;
683 hr_qp
->rq
.wrid
[ind
] = wr
->wr_id
;
685 ind
= (ind
+ 1) & (hr_qp
->rq
.wqe_cnt
- 1);
690 hr_qp
->rq
.head
+= nreq
;
694 *hr_qp
->rdb
.db_record
= hr_qp
->rq
.head
& 0xffff;
696 if (hr_qp
->state
== IB_QPS_ERR
) {
697 attr_mask
= IB_QP_STATE
;
698 attr
.qp_state
= IB_QPS_ERR
;
700 ret
= hns_roce_v2_modify_qp(&hr_qp
->ibqp
, &attr
,
701 attr_mask
, hr_qp
->state
,
704 spin_unlock_irqrestore(&hr_qp
->rq
.lock
, flags
);
710 spin_unlock_irqrestore(&hr_qp
->rq
.lock
, flags
);
715 static int hns_roce_cmq_space(struct hns_roce_v2_cmq_ring
*ring
)
717 int ntu
= ring
->next_to_use
;
718 int ntc
= ring
->next_to_clean
;
719 int used
= (ntu
- ntc
+ ring
->desc_num
) % ring
->desc_num
;
721 return ring
->desc_num
- used
- 1;
724 static int hns_roce_alloc_cmq_desc(struct hns_roce_dev
*hr_dev
,
725 struct hns_roce_v2_cmq_ring
*ring
)
727 int size
= ring
->desc_num
* sizeof(struct hns_roce_cmq_desc
);
729 ring
->desc
= kzalloc(size
, GFP_KERNEL
);
733 ring
->desc_dma_addr
= dma_map_single(hr_dev
->dev
, ring
->desc
, size
,
735 if (dma_mapping_error(hr_dev
->dev
, ring
->desc_dma_addr
)) {
736 ring
->desc_dma_addr
= 0;
745 static void hns_roce_free_cmq_desc(struct hns_roce_dev
*hr_dev
,
746 struct hns_roce_v2_cmq_ring
*ring
)
748 dma_unmap_single(hr_dev
->dev
, ring
->desc_dma_addr
,
749 ring
->desc_num
* sizeof(struct hns_roce_cmq_desc
),
752 ring
->desc_dma_addr
= 0;
756 static int hns_roce_init_cmq_ring(struct hns_roce_dev
*hr_dev
, bool ring_type
)
758 struct hns_roce_v2_priv
*priv
= (struct hns_roce_v2_priv
*)hr_dev
->priv
;
759 struct hns_roce_v2_cmq_ring
*ring
= (ring_type
== TYPE_CSQ
) ?
760 &priv
->cmq
.csq
: &priv
->cmq
.crq
;
762 ring
->flag
= ring_type
;
763 ring
->next_to_clean
= 0;
764 ring
->next_to_use
= 0;
766 return hns_roce_alloc_cmq_desc(hr_dev
, ring
);
769 static void hns_roce_cmq_init_regs(struct hns_roce_dev
*hr_dev
, bool ring_type
)
771 struct hns_roce_v2_priv
*priv
= (struct hns_roce_v2_priv
*)hr_dev
->priv
;
772 struct hns_roce_v2_cmq_ring
*ring
= (ring_type
== TYPE_CSQ
) ?
773 &priv
->cmq
.csq
: &priv
->cmq
.crq
;
774 dma_addr_t dma
= ring
->desc_dma_addr
;
776 if (ring_type
== TYPE_CSQ
) {
777 roce_write(hr_dev
, ROCEE_TX_CMQ_BASEADDR_L_REG
, (u32
)dma
);
778 roce_write(hr_dev
, ROCEE_TX_CMQ_BASEADDR_H_REG
,
780 roce_write(hr_dev
, ROCEE_TX_CMQ_DEPTH_REG
,
781 (ring
->desc_num
>> HNS_ROCE_CMQ_DESC_NUM_S
) |
782 HNS_ROCE_CMQ_ENABLE
);
783 roce_write(hr_dev
, ROCEE_TX_CMQ_HEAD_REG
, 0);
784 roce_write(hr_dev
, ROCEE_TX_CMQ_TAIL_REG
, 0);
786 roce_write(hr_dev
, ROCEE_RX_CMQ_BASEADDR_L_REG
, (u32
)dma
);
787 roce_write(hr_dev
, ROCEE_RX_CMQ_BASEADDR_H_REG
,
789 roce_write(hr_dev
, ROCEE_RX_CMQ_DEPTH_REG
,
790 (ring
->desc_num
>> HNS_ROCE_CMQ_DESC_NUM_S
) |
791 HNS_ROCE_CMQ_ENABLE
);
792 roce_write(hr_dev
, ROCEE_RX_CMQ_HEAD_REG
, 0);
793 roce_write(hr_dev
, ROCEE_RX_CMQ_TAIL_REG
, 0);
797 static int hns_roce_v2_cmq_init(struct hns_roce_dev
*hr_dev
)
799 struct hns_roce_v2_priv
*priv
= (struct hns_roce_v2_priv
*)hr_dev
->priv
;
802 /* Setup the queue entries for command queue */
803 priv
->cmq
.csq
.desc_num
= CMD_CSQ_DESC_NUM
;
804 priv
->cmq
.crq
.desc_num
= CMD_CRQ_DESC_NUM
;
806 /* Setup the lock for command queue */
807 spin_lock_init(&priv
->cmq
.csq
.lock
);
808 spin_lock_init(&priv
->cmq
.crq
.lock
);
810 /* Setup Tx write back timeout */
811 priv
->cmq
.tx_timeout
= HNS_ROCE_CMQ_TX_TIMEOUT
;
814 ret
= hns_roce_init_cmq_ring(hr_dev
, TYPE_CSQ
);
816 dev_err(hr_dev
->dev
, "Init CSQ error, ret = %d.\n", ret
);
821 ret
= hns_roce_init_cmq_ring(hr_dev
, TYPE_CRQ
);
823 dev_err(hr_dev
->dev
, "Init CRQ error, ret = %d.\n", ret
);
828 hns_roce_cmq_init_regs(hr_dev
, TYPE_CSQ
);
831 hns_roce_cmq_init_regs(hr_dev
, TYPE_CRQ
);
836 hns_roce_free_cmq_desc(hr_dev
, &priv
->cmq
.csq
);
841 static void hns_roce_v2_cmq_exit(struct hns_roce_dev
*hr_dev
)
843 struct hns_roce_v2_priv
*priv
= (struct hns_roce_v2_priv
*)hr_dev
->priv
;
845 hns_roce_free_cmq_desc(hr_dev
, &priv
->cmq
.csq
);
846 hns_roce_free_cmq_desc(hr_dev
, &priv
->cmq
.crq
);
849 static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc
*desc
,
850 enum hns_roce_opcode_type opcode
,
853 memset((void *)desc
, 0, sizeof(struct hns_roce_cmq_desc
));
854 desc
->opcode
= cpu_to_le16(opcode
);
856 cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR
| HNS_ROCE_CMD_FLAG_IN
);
858 desc
->flag
|= cpu_to_le16(HNS_ROCE_CMD_FLAG_WR
);
860 desc
->flag
&= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR
);
863 static int hns_roce_cmq_csq_done(struct hns_roce_dev
*hr_dev
)
865 struct hns_roce_v2_priv
*priv
= (struct hns_roce_v2_priv
*)hr_dev
->priv
;
866 u32 head
= roce_read(hr_dev
, ROCEE_TX_CMQ_HEAD_REG
);
868 return head
== priv
->cmq
.csq
.next_to_use
;
871 static int hns_roce_cmq_csq_clean(struct hns_roce_dev
*hr_dev
)
873 struct hns_roce_v2_priv
*priv
= (struct hns_roce_v2_priv
*)hr_dev
->priv
;
874 struct hns_roce_v2_cmq_ring
*csq
= &priv
->cmq
.csq
;
875 struct hns_roce_cmq_desc
*desc
;
876 u16 ntc
= csq
->next_to_clean
;
880 desc
= &csq
->desc
[ntc
];
881 head
= roce_read(hr_dev
, ROCEE_TX_CMQ_HEAD_REG
);
882 while (head
!= ntc
) {
883 memset(desc
, 0, sizeof(*desc
));
885 if (ntc
== csq
->desc_num
)
887 desc
= &csq
->desc
[ntc
];
890 csq
->next_to_clean
= ntc
;
895 static int hns_roce_cmq_send(struct hns_roce_dev
*hr_dev
,
896 struct hns_roce_cmq_desc
*desc
, int num
)
898 struct hns_roce_v2_priv
*priv
= (struct hns_roce_v2_priv
*)hr_dev
->priv
;
899 struct hns_roce_v2_cmq_ring
*csq
= &priv
->cmq
.csq
;
900 struct hns_roce_cmq_desc
*desc_to_use
;
901 bool complete
= false;
908 if (hr_dev
->is_reset
)
911 spin_lock_bh(&csq
->lock
);
913 if (num
> hns_roce_cmq_space(csq
)) {
914 spin_unlock_bh(&csq
->lock
);
919 * Record the location of desc in the cmq for this time
920 * which will be use for hardware to write back
922 ntc
= csq
->next_to_use
;
924 while (handle
< num
) {
925 desc_to_use
= &csq
->desc
[csq
->next_to_use
];
926 *desc_to_use
= desc
[handle
];
927 dev_dbg(hr_dev
->dev
, "set cmq desc:\n");
929 if (csq
->next_to_use
== csq
->desc_num
)
930 csq
->next_to_use
= 0;
934 /* Write to hardware */
935 roce_write(hr_dev
, ROCEE_TX_CMQ_TAIL_REG
, csq
->next_to_use
);
938 * If the command is sync, wait for the firmware to write back,
939 * if multi descriptors to be sent, use the first one to check
941 if ((desc
->flag
) & HNS_ROCE_CMD_FLAG_NO_INTR
) {
943 if (hns_roce_cmq_csq_done(hr_dev
))
947 } while (timeout
< priv
->cmq
.tx_timeout
);
950 if (hns_roce_cmq_csq_done(hr_dev
)) {
953 while (handle
< num
) {
954 /* get the result of hardware write back */
955 desc_to_use
= &csq
->desc
[ntc
];
956 desc
[handle
] = *desc_to_use
;
957 dev_dbg(hr_dev
->dev
, "Get cmq desc:\n");
958 desc_ret
= desc
[handle
].retval
;
959 if (desc_ret
== CMD_EXEC_SUCCESS
)
963 priv
->cmq
.last_status
= desc_ret
;
966 if (ntc
== csq
->desc_num
)
974 /* clean the command send queue */
975 handle
= hns_roce_cmq_csq_clean(hr_dev
);
977 dev_warn(hr_dev
->dev
, "Cleaned %d, need to clean %d\n",
980 spin_unlock_bh(&csq
->lock
);
985 static int hns_roce_cmq_query_hw_info(struct hns_roce_dev
*hr_dev
)
987 struct hns_roce_query_version
*resp
;
988 struct hns_roce_cmq_desc desc
;
991 hns_roce_cmq_setup_basic_desc(&desc
, HNS_ROCE_OPC_QUERY_HW_VER
, true);
992 ret
= hns_roce_cmq_send(hr_dev
, &desc
, 1);
996 resp
= (struct hns_roce_query_version
*)desc
.data
;
997 hr_dev
->hw_rev
= le32_to_cpu(resp
->rocee_hw_version
);
998 hr_dev
->vendor_id
= hr_dev
->pci_dev
->vendor
;
1003 static int hns_roce_query_fw_ver(struct hns_roce_dev
*hr_dev
)
1005 struct hns_roce_query_fw_info
*resp
;
1006 struct hns_roce_cmq_desc desc
;
1009 hns_roce_cmq_setup_basic_desc(&desc
, HNS_QUERY_FW_VER
, true);
1010 ret
= hns_roce_cmq_send(hr_dev
, &desc
, 1);
1014 resp
= (struct hns_roce_query_fw_info
*)desc
.data
;
1015 hr_dev
->caps
.fw_ver
= (u64
)(le32_to_cpu(resp
->fw_ver
));
1020 static int hns_roce_config_global_param(struct hns_roce_dev
*hr_dev
)
1022 struct hns_roce_cfg_global_param
*req
;
1023 struct hns_roce_cmq_desc desc
;
1025 hns_roce_cmq_setup_basic_desc(&desc
, HNS_ROCE_OPC_CFG_GLOBAL_PARAM
,
1028 req
= (struct hns_roce_cfg_global_param
*)desc
.data
;
1029 memset(req
, 0, sizeof(*req
));
1030 roce_set_field(req
->time_cfg_udp_port
,
1031 CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_M
,
1032 CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_S
, 0x3e8);
1033 roce_set_field(req
->time_cfg_udp_port
,
1034 CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_M
,
1035 CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_S
, 0x12b7);
1037 return hns_roce_cmq_send(hr_dev
, &desc
, 1);
1040 static int hns_roce_query_pf_resource(struct hns_roce_dev
*hr_dev
)
1042 struct hns_roce_cmq_desc desc
[2];
1043 struct hns_roce_pf_res_a
*req_a
;
1044 struct hns_roce_pf_res_b
*req_b
;
1048 for (i
= 0; i
< 2; i
++) {
1049 hns_roce_cmq_setup_basic_desc(&desc
[i
],
1050 HNS_ROCE_OPC_QUERY_PF_RES
, true);
1053 desc
[i
].flag
|= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT
);
1055 desc
[i
].flag
&= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT
);
1058 ret
= hns_roce_cmq_send(hr_dev
, desc
, 2);
1062 req_a
= (struct hns_roce_pf_res_a
*)desc
[0].data
;
1063 req_b
= (struct hns_roce_pf_res_b
*)desc
[1].data
;
1065 hr_dev
->caps
.qpc_bt_num
= roce_get_field(req_a
->qpc_bt_idx_num
,
1066 PF_RES_DATA_1_PF_QPC_BT_NUM_M
,
1067 PF_RES_DATA_1_PF_QPC_BT_NUM_S
);
1068 hr_dev
->caps
.srqc_bt_num
= roce_get_field(req_a
->srqc_bt_idx_num
,
1069 PF_RES_DATA_2_PF_SRQC_BT_NUM_M
,
1070 PF_RES_DATA_2_PF_SRQC_BT_NUM_S
);
1071 hr_dev
->caps
.cqc_bt_num
= roce_get_field(req_a
->cqc_bt_idx_num
,
1072 PF_RES_DATA_3_PF_CQC_BT_NUM_M
,
1073 PF_RES_DATA_3_PF_CQC_BT_NUM_S
);
1074 hr_dev
->caps
.mpt_bt_num
= roce_get_field(req_a
->mpt_bt_idx_num
,
1075 PF_RES_DATA_4_PF_MPT_BT_NUM_M
,
1076 PF_RES_DATA_4_PF_MPT_BT_NUM_S
);
1078 hr_dev
->caps
.sl_num
= roce_get_field(req_b
->qid_idx_sl_num
,
1079 PF_RES_DATA_3_PF_SL_NUM_M
,
1080 PF_RES_DATA_3_PF_SL_NUM_S
);
1085 static int hns_roce_set_vf_switch_param(struct hns_roce_dev
*hr_dev
,
1088 struct hns_roce_cmq_desc desc
;
1089 struct hns_roce_vf_switch
*swt
;
1092 swt
= (struct hns_roce_vf_switch
*)desc
.data
;
1093 hns_roce_cmq_setup_basic_desc(&desc
, HNS_SWITCH_PARAMETER_CFG
, true);
1094 swt
->rocee_sel
|= cpu_to_le16(HNS_ICL_SWITCH_CMD_ROCEE_SEL
);
1095 roce_set_field(swt
->fun_id
,
1096 VF_SWITCH_DATA_FUN_ID_VF_ID_M
,
1097 VF_SWITCH_DATA_FUN_ID_VF_ID_S
,
1099 ret
= hns_roce_cmq_send(hr_dev
, &desc
, 1);
1103 cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR
| HNS_ROCE_CMD_FLAG_IN
);
1104 desc
.flag
&= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR
);
1105 roce_set_bit(swt
->cfg
, VF_SWITCH_DATA_CFG_ALW_LPBK_S
, 1);
1106 roce_set_bit(swt
->cfg
, VF_SWITCH_DATA_CFG_ALW_LCL_LPBK_S
, 1);
1107 roce_set_bit(swt
->cfg
, VF_SWITCH_DATA_CFG_ALW_DST_OVRD_S
, 1);
1109 return hns_roce_cmq_send(hr_dev
, &desc
, 1);
1112 static int hns_roce_alloc_vf_resource(struct hns_roce_dev
*hr_dev
)
1114 struct hns_roce_cmq_desc desc
[2];
1115 struct hns_roce_vf_res_a
*req_a
;
1116 struct hns_roce_vf_res_b
*req_b
;
1119 req_a
= (struct hns_roce_vf_res_a
*)desc
[0].data
;
1120 req_b
= (struct hns_roce_vf_res_b
*)desc
[1].data
;
1121 memset(req_a
, 0, sizeof(*req_a
));
1122 memset(req_b
, 0, sizeof(*req_b
));
1123 for (i
= 0; i
< 2; i
++) {
1124 hns_roce_cmq_setup_basic_desc(&desc
[i
],
1125 HNS_ROCE_OPC_ALLOC_VF_RES
, false);
1128 desc
[i
].flag
|= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT
);
1130 desc
[i
].flag
&= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT
);
1133 roce_set_field(req_a
->vf_qpc_bt_idx_num
,
1134 VF_RES_A_DATA_1_VF_QPC_BT_IDX_M
,
1135 VF_RES_A_DATA_1_VF_QPC_BT_IDX_S
, 0);
1136 roce_set_field(req_a
->vf_qpc_bt_idx_num
,
1137 VF_RES_A_DATA_1_VF_QPC_BT_NUM_M
,
1138 VF_RES_A_DATA_1_VF_QPC_BT_NUM_S
,
1139 HNS_ROCE_VF_QPC_BT_NUM
);
1141 roce_set_field(req_a
->vf_srqc_bt_idx_num
,
1142 VF_RES_A_DATA_2_VF_SRQC_BT_IDX_M
,
1143 VF_RES_A_DATA_2_VF_SRQC_BT_IDX_S
, 0);
1144 roce_set_field(req_a
->vf_srqc_bt_idx_num
,
1145 VF_RES_A_DATA_2_VF_SRQC_BT_NUM_M
,
1146 VF_RES_A_DATA_2_VF_SRQC_BT_NUM_S
,
1147 HNS_ROCE_VF_SRQC_BT_NUM
);
1149 roce_set_field(req_a
->vf_cqc_bt_idx_num
,
1150 VF_RES_A_DATA_3_VF_CQC_BT_IDX_M
,
1151 VF_RES_A_DATA_3_VF_CQC_BT_IDX_S
, 0);
1152 roce_set_field(req_a
->vf_cqc_bt_idx_num
,
1153 VF_RES_A_DATA_3_VF_CQC_BT_NUM_M
,
1154 VF_RES_A_DATA_3_VF_CQC_BT_NUM_S
,
1155 HNS_ROCE_VF_CQC_BT_NUM
);
1157 roce_set_field(req_a
->vf_mpt_bt_idx_num
,
1158 VF_RES_A_DATA_4_VF_MPT_BT_IDX_M
,
1159 VF_RES_A_DATA_4_VF_MPT_BT_IDX_S
, 0);
1160 roce_set_field(req_a
->vf_mpt_bt_idx_num
,
1161 VF_RES_A_DATA_4_VF_MPT_BT_NUM_M
,
1162 VF_RES_A_DATA_4_VF_MPT_BT_NUM_S
,
1163 HNS_ROCE_VF_MPT_BT_NUM
);
1165 roce_set_field(req_a
->vf_eqc_bt_idx_num
,
1166 VF_RES_A_DATA_5_VF_EQC_IDX_M
,
1167 VF_RES_A_DATA_5_VF_EQC_IDX_S
, 0);
1168 roce_set_field(req_a
->vf_eqc_bt_idx_num
,
1169 VF_RES_A_DATA_5_VF_EQC_NUM_M
,
1170 VF_RES_A_DATA_5_VF_EQC_NUM_S
,
1171 HNS_ROCE_VF_EQC_NUM
);
1173 roce_set_field(req_b
->vf_smac_idx_num
,
1174 VF_RES_B_DATA_1_VF_SMAC_IDX_M
,
1175 VF_RES_B_DATA_1_VF_SMAC_IDX_S
, 0);
1176 roce_set_field(req_b
->vf_smac_idx_num
,
1177 VF_RES_B_DATA_1_VF_SMAC_NUM_M
,
1178 VF_RES_B_DATA_1_VF_SMAC_NUM_S
,
1179 HNS_ROCE_VF_SMAC_NUM
);
1181 roce_set_field(req_b
->vf_sgid_idx_num
,
1182 VF_RES_B_DATA_2_VF_SGID_IDX_M
,
1183 VF_RES_B_DATA_2_VF_SGID_IDX_S
, 0);
1184 roce_set_field(req_b
->vf_sgid_idx_num
,
1185 VF_RES_B_DATA_2_VF_SGID_NUM_M
,
1186 VF_RES_B_DATA_2_VF_SGID_NUM_S
,
1187 HNS_ROCE_VF_SGID_NUM
);
1189 roce_set_field(req_b
->vf_qid_idx_sl_num
,
1190 VF_RES_B_DATA_3_VF_QID_IDX_M
,
1191 VF_RES_B_DATA_3_VF_QID_IDX_S
, 0);
1192 roce_set_field(req_b
->vf_qid_idx_sl_num
,
1193 VF_RES_B_DATA_3_VF_SL_NUM_M
,
1194 VF_RES_B_DATA_3_VF_SL_NUM_S
,
1195 HNS_ROCE_VF_SL_NUM
);
1199 return hns_roce_cmq_send(hr_dev
, desc
, 2);
1202 static int hns_roce_v2_set_bt(struct hns_roce_dev
*hr_dev
)
1204 u8 srqc_hop_num
= hr_dev
->caps
.srqc_hop_num
;
1205 u8 qpc_hop_num
= hr_dev
->caps
.qpc_hop_num
;
1206 u8 cqc_hop_num
= hr_dev
->caps
.cqc_hop_num
;
1207 u8 mpt_hop_num
= hr_dev
->caps
.mpt_hop_num
;
1208 struct hns_roce_cfg_bt_attr
*req
;
1209 struct hns_roce_cmq_desc desc
;
1211 hns_roce_cmq_setup_basic_desc(&desc
, HNS_ROCE_OPC_CFG_BT_ATTR
, false);
1212 req
= (struct hns_roce_cfg_bt_attr
*)desc
.data
;
1213 memset(req
, 0, sizeof(*req
));
1215 roce_set_field(req
->vf_qpc_cfg
, CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_M
,
1216 CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S
,
1217 hr_dev
->caps
.qpc_ba_pg_sz
+ PG_SHIFT_OFFSET
);
1218 roce_set_field(req
->vf_qpc_cfg
, CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_M
,
1219 CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_S
,
1220 hr_dev
->caps
.qpc_buf_pg_sz
+ PG_SHIFT_OFFSET
);
1221 roce_set_field(req
->vf_qpc_cfg
, CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_M
,
1222 CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_S
,
1223 qpc_hop_num
== HNS_ROCE_HOP_NUM_0
? 0 : qpc_hop_num
);
1225 roce_set_field(req
->vf_srqc_cfg
, CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_M
,
1226 CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_S
,
1227 hr_dev
->caps
.srqc_ba_pg_sz
+ PG_SHIFT_OFFSET
);
1228 roce_set_field(req
->vf_srqc_cfg
, CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_M
,
1229 CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_S
,
1230 hr_dev
->caps
.srqc_buf_pg_sz
+ PG_SHIFT_OFFSET
);
1231 roce_set_field(req
->vf_srqc_cfg
, CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_M
,
1232 CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_S
,
1233 srqc_hop_num
== HNS_ROCE_HOP_NUM_0
? 0 : srqc_hop_num
);
1235 roce_set_field(req
->vf_cqc_cfg
, CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_M
,
1236 CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_S
,
1237 hr_dev
->caps
.cqc_ba_pg_sz
+ PG_SHIFT_OFFSET
);
1238 roce_set_field(req
->vf_cqc_cfg
, CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_M
,
1239 CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_S
,
1240 hr_dev
->caps
.cqc_buf_pg_sz
+ PG_SHIFT_OFFSET
);
1241 roce_set_field(req
->vf_cqc_cfg
, CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_M
,
1242 CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_S
,
1243 cqc_hop_num
== HNS_ROCE_HOP_NUM_0
? 0 : cqc_hop_num
);
1245 roce_set_field(req
->vf_mpt_cfg
, CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_M
,
1246 CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_S
,
1247 hr_dev
->caps
.mpt_ba_pg_sz
+ PG_SHIFT_OFFSET
);
1248 roce_set_field(req
->vf_mpt_cfg
, CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_M
,
1249 CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_S
,
1250 hr_dev
->caps
.mpt_buf_pg_sz
+ PG_SHIFT_OFFSET
);
1251 roce_set_field(req
->vf_mpt_cfg
, CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M
,
1252 CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S
,
1253 mpt_hop_num
== HNS_ROCE_HOP_NUM_0
? 0 : mpt_hop_num
);
1255 return hns_roce_cmq_send(hr_dev
, &desc
, 1);
1258 static int hns_roce_v2_profile(struct hns_roce_dev
*hr_dev
)
1260 struct hns_roce_caps
*caps
= &hr_dev
->caps
;
1263 ret
= hns_roce_cmq_query_hw_info(hr_dev
);
1265 dev_err(hr_dev
->dev
, "Query hardware version fail, ret = %d.\n",
1270 ret
= hns_roce_query_fw_ver(hr_dev
);
1272 dev_err(hr_dev
->dev
, "Query firmware version fail, ret = %d.\n",
1277 ret
= hns_roce_config_global_param(hr_dev
);
1279 dev_err(hr_dev
->dev
, "Configure global param fail, ret = %d.\n",
1284 /* Get pf resource owned by every pf */
1285 ret
= hns_roce_query_pf_resource(hr_dev
);
1287 dev_err(hr_dev
->dev
, "Query pf resource fail, ret = %d.\n",
1292 ret
= hns_roce_alloc_vf_resource(hr_dev
);
1294 dev_err(hr_dev
->dev
, "Allocate vf resource fail, ret = %d.\n",
1299 if (hr_dev
->pci_dev
->revision
== 0x21) {
1300 ret
= hns_roce_set_vf_switch_param(hr_dev
, 0);
1302 dev_err(hr_dev
->dev
,
1303 "Set function switch param fail, ret = %d.\n",
1309 hr_dev
->vendor_part_id
= hr_dev
->pci_dev
->device
;
1310 hr_dev
->sys_image_guid
= be64_to_cpu(hr_dev
->ib_dev
.node_guid
);
1312 caps
->num_qps
= HNS_ROCE_V2_MAX_QP_NUM
;
1313 caps
->max_wqes
= HNS_ROCE_V2_MAX_WQE_NUM
;
1314 caps
->num_cqs
= HNS_ROCE_V2_MAX_CQ_NUM
;
1315 caps
->num_srqs
= HNS_ROCE_V2_MAX_SRQ_NUM
;
1316 caps
->max_cqes
= HNS_ROCE_V2_MAX_CQE_NUM
;
1317 caps
->max_srqwqes
= HNS_ROCE_V2_MAX_SRQWQE_NUM
;
1318 caps
->max_sq_sg
= HNS_ROCE_V2_MAX_SQ_SGE_NUM
;
1319 caps
->max_extend_sg
= HNS_ROCE_V2_MAX_EXTEND_SGE_NUM
;
1320 caps
->max_rq_sg
= HNS_ROCE_V2_MAX_RQ_SGE_NUM
;
1321 caps
->max_sq_inline
= HNS_ROCE_V2_MAX_SQ_INLINE
;
1322 caps
->max_srq_sg
= HNS_ROCE_V2_MAX_SRQ_SGE_NUM
;
1323 caps
->num_uars
= HNS_ROCE_V2_UAR_NUM
;
1324 caps
->phy_num_uars
= HNS_ROCE_V2_PHY_UAR_NUM
;
1325 caps
->num_aeq_vectors
= HNS_ROCE_V2_AEQE_VEC_NUM
;
1326 caps
->num_comp_vectors
= HNS_ROCE_V2_COMP_VEC_NUM
;
1327 caps
->num_other_vectors
= HNS_ROCE_V2_ABNORMAL_VEC_NUM
;
1328 caps
->num_mtpts
= HNS_ROCE_V2_MAX_MTPT_NUM
;
1329 caps
->num_mtt_segs
= HNS_ROCE_V2_MAX_MTT_SEGS
;
1330 caps
->num_cqe_segs
= HNS_ROCE_V2_MAX_CQE_SEGS
;
1331 caps
->num_srqwqe_segs
= HNS_ROCE_V2_MAX_SRQWQE_SEGS
;
1332 caps
->num_idx_segs
= HNS_ROCE_V2_MAX_IDX_SEGS
;
1333 caps
->num_pds
= HNS_ROCE_V2_MAX_PD_NUM
;
1334 caps
->max_qp_init_rdma
= HNS_ROCE_V2_MAX_QP_INIT_RDMA
;
1335 caps
->max_qp_dest_rdma
= HNS_ROCE_V2_MAX_QP_DEST_RDMA
;
1336 caps
->max_sq_desc_sz
= HNS_ROCE_V2_MAX_SQ_DESC_SZ
;
1337 caps
->max_rq_desc_sz
= HNS_ROCE_V2_MAX_RQ_DESC_SZ
;
1338 caps
->max_srq_desc_sz
= HNS_ROCE_V2_MAX_SRQ_DESC_SZ
;
1339 caps
->qpc_entry_sz
= HNS_ROCE_V2_QPC_ENTRY_SZ
;
1340 caps
->irrl_entry_sz
= HNS_ROCE_V2_IRRL_ENTRY_SZ
;
1341 caps
->trrl_entry_sz
= HNS_ROCE_V2_TRRL_ENTRY_SZ
;
1342 caps
->cqc_entry_sz
= HNS_ROCE_V2_CQC_ENTRY_SZ
;
1343 caps
->srqc_entry_sz
= HNS_ROCE_V2_SRQC_ENTRY_SZ
;
1344 caps
->mtpt_entry_sz
= HNS_ROCE_V2_MTPT_ENTRY_SZ
;
1345 caps
->mtt_entry_sz
= HNS_ROCE_V2_MTT_ENTRY_SZ
;
1346 caps
->idx_entry_sz
= 4;
1347 caps
->cq_entry_sz
= HNS_ROCE_V2_CQE_ENTRY_SIZE
;
1348 caps
->page_size_cap
= HNS_ROCE_V2_PAGE_SIZE_SUPPORTED
;
1349 caps
->reserved_lkey
= 0;
1350 caps
->reserved_pds
= 0;
1351 caps
->reserved_mrws
= 1;
1352 caps
->reserved_uars
= 0;
1353 caps
->reserved_cqs
= 0;
1354 caps
->reserved_srqs
= 0;
1355 caps
->reserved_qps
= HNS_ROCE_V2_RSV_QPS
;
1357 caps
->qpc_ba_pg_sz
= 0;
1358 caps
->qpc_buf_pg_sz
= 0;
1359 caps
->qpc_hop_num
= HNS_ROCE_CONTEXT_HOP_NUM
;
1360 caps
->srqc_ba_pg_sz
= 0;
1361 caps
->srqc_buf_pg_sz
= 0;
1362 caps
->srqc_hop_num
= HNS_ROCE_HOP_NUM_0
;
1363 caps
->cqc_ba_pg_sz
= 0;
1364 caps
->cqc_buf_pg_sz
= 0;
1365 caps
->cqc_hop_num
= HNS_ROCE_CONTEXT_HOP_NUM
;
1366 caps
->mpt_ba_pg_sz
= 0;
1367 caps
->mpt_buf_pg_sz
= 0;
1368 caps
->mpt_hop_num
= HNS_ROCE_CONTEXT_HOP_NUM
;
1369 caps
->pbl_ba_pg_sz
= 0;
1370 caps
->pbl_buf_pg_sz
= 0;
1371 caps
->pbl_hop_num
= HNS_ROCE_PBL_HOP_NUM
;
1372 caps
->mtt_ba_pg_sz
= 0;
1373 caps
->mtt_buf_pg_sz
= 0;
1374 caps
->mtt_hop_num
= HNS_ROCE_MTT_HOP_NUM
;
1375 caps
->cqe_ba_pg_sz
= 0;
1376 caps
->cqe_buf_pg_sz
= 0;
1377 caps
->cqe_hop_num
= HNS_ROCE_CQE_HOP_NUM
;
1378 caps
->srqwqe_ba_pg_sz
= 0;
1379 caps
->srqwqe_buf_pg_sz
= 0;
1380 caps
->srqwqe_hop_num
= HNS_ROCE_SRQWQE_HOP_NUM
;
1381 caps
->idx_ba_pg_sz
= 0;
1382 caps
->idx_buf_pg_sz
= 0;
1383 caps
->idx_hop_num
= HNS_ROCE_IDX_HOP_NUM
;
1384 caps
->eqe_ba_pg_sz
= 0;
1385 caps
->eqe_buf_pg_sz
= 0;
1386 caps
->eqe_hop_num
= HNS_ROCE_EQE_HOP_NUM
;
1387 caps
->tsq_buf_pg_sz
= 0;
1388 caps
->chunk_sz
= HNS_ROCE_V2_TABLE_CHUNK_SIZE
;
1390 caps
->flags
= HNS_ROCE_CAP_FLAG_REREG_MR
|
1391 HNS_ROCE_CAP_FLAG_ROCE_V1_V2
|
1392 HNS_ROCE_CAP_FLAG_RQ_INLINE
|
1393 HNS_ROCE_CAP_FLAG_RECORD_DB
|
1394 HNS_ROCE_CAP_FLAG_SQ_RECORD_DB
;
1396 if (hr_dev
->pci_dev
->revision
== 0x21)
1397 caps
->flags
|= HNS_ROCE_CAP_FLAG_MW
|
1398 HNS_ROCE_CAP_FLAG_FRMR
;
1400 caps
->pkey_table_len
[0] = 1;
1401 caps
->gid_table_len
[0] = HNS_ROCE_V2_GID_INDEX_NUM
;
1402 caps
->ceqe_depth
= HNS_ROCE_V2_COMP_EQE_NUM
;
1403 caps
->aeqe_depth
= HNS_ROCE_V2_ASYNC_EQE_NUM
;
1404 caps
->local_ca_ack_delay
= 0;
1405 caps
->max_mtu
= IB_MTU_4096
;
1407 caps
->max_srqs
= HNS_ROCE_V2_MAX_SRQ
;
1408 caps
->max_srq_wrs
= HNS_ROCE_V2_MAX_SRQ_WR
;
1409 caps
->max_srq_sges
= HNS_ROCE_V2_MAX_SRQ_SGE
;
1411 if (hr_dev
->pci_dev
->revision
== 0x21)
1412 caps
->flags
|= HNS_ROCE_CAP_FLAG_ATOMIC
|
1413 HNS_ROCE_CAP_FLAG_SRQ
;
1415 ret
= hns_roce_v2_set_bt(hr_dev
);
1417 dev_err(hr_dev
->dev
, "Configure bt attribute fail, ret = %d.\n",
1423 static int hns_roce_config_link_table(struct hns_roce_dev
*hr_dev
,
1424 enum hns_roce_link_table_type type
)
1426 struct hns_roce_cmq_desc desc
[2];
1427 struct hns_roce_cfg_llm_a
*req_a
=
1428 (struct hns_roce_cfg_llm_a
*)desc
[0].data
;
1429 struct hns_roce_cfg_llm_b
*req_b
=
1430 (struct hns_roce_cfg_llm_b
*)desc
[1].data
;
1431 struct hns_roce_v2_priv
*priv
= hr_dev
->priv
;
1432 struct hns_roce_link_table
*link_tbl
;
1433 struct hns_roce_link_table_entry
*entry
;
1434 enum hns_roce_opcode_type opcode
;
1439 case TSQ_LINK_TABLE
:
1440 link_tbl
= &priv
->tsq
;
1441 opcode
= HNS_ROCE_OPC_CFG_EXT_LLM
;
1443 case TPQ_LINK_TABLE
:
1444 link_tbl
= &priv
->tpq
;
1445 opcode
= HNS_ROCE_OPC_CFG_TMOUT_LLM
;
1451 page_num
= link_tbl
->npages
;
1452 entry
= link_tbl
->table
.buf
;
1453 memset(req_a
, 0, sizeof(*req_a
));
1454 memset(req_b
, 0, sizeof(*req_b
));
1456 for (i
= 0; i
< 2; i
++) {
1457 hns_roce_cmq_setup_basic_desc(&desc
[i
], opcode
, false);
1460 desc
[i
].flag
|= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT
);
1462 desc
[i
].flag
&= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT
);
1465 req_a
->base_addr_l
= link_tbl
->table
.map
& 0xffffffff;
1466 req_a
->base_addr_h
= (link_tbl
->table
.map
>> 32) &
1468 roce_set_field(req_a
->depth_pgsz_init_en
,
1469 CFG_LLM_QUE_DEPTH_M
,
1470 CFG_LLM_QUE_DEPTH_S
,
1472 roce_set_field(req_a
->depth_pgsz_init_en
,
1476 req_a
->head_ba_l
= entry
[0].blk_ba0
;
1477 req_a
->head_ba_h_nxtptr
= entry
[0].blk_ba1_nxt_ptr
;
1478 roce_set_field(req_a
->head_ptr
,
1480 CFG_LLM_HEAD_PTR_S
, 0);
1482 req_b
->tail_ba_l
= entry
[page_num
- 1].blk_ba0
;
1483 roce_set_field(req_b
->tail_ba_h
,
1484 CFG_LLM_TAIL_BA_H_M
,
1485 CFG_LLM_TAIL_BA_H_S
,
1486 entry
[page_num
- 1].blk_ba1_nxt_ptr
&
1487 HNS_ROCE_LINK_TABLE_BA1_M
);
1488 roce_set_field(req_b
->tail_ptr
,
1491 (entry
[page_num
- 2].blk_ba1_nxt_ptr
&
1492 HNS_ROCE_LINK_TABLE_NXT_PTR_M
) >>
1493 HNS_ROCE_LINK_TABLE_NXT_PTR_S
);
1496 roce_set_field(req_a
->depth_pgsz_init_en
,
1497 CFG_LLM_INIT_EN_M
, CFG_LLM_INIT_EN_S
, 1);
1499 return hns_roce_cmq_send(hr_dev
, desc
, 2);
1502 static int hns_roce_init_link_table(struct hns_roce_dev
*hr_dev
,
1503 enum hns_roce_link_table_type type
)
1505 struct hns_roce_v2_priv
*priv
= hr_dev
->priv
;
1506 struct hns_roce_link_table
*link_tbl
;
1507 struct hns_roce_link_table_entry
*entry
;
1508 struct device
*dev
= hr_dev
->dev
;
1519 case TSQ_LINK_TABLE
:
1520 link_tbl
= &priv
->tsq
;
1521 buf_chk_sz
= 1 << (hr_dev
->caps
.tsq_buf_pg_sz
+ PAGE_SHIFT
);
1522 pg_num_a
= hr_dev
->caps
.num_qps
* 8 / buf_chk_sz
;
1523 pg_num_b
= hr_dev
->caps
.sl_num
* 4 + 2;
1525 case TPQ_LINK_TABLE
:
1526 link_tbl
= &priv
->tpq
;
1527 buf_chk_sz
= 1 << (hr_dev
->caps
.tpq_buf_pg_sz
+ PAGE_SHIFT
);
1528 pg_num_a
= hr_dev
->caps
.num_cqs
* 4 / buf_chk_sz
;
1529 pg_num_b
= 2 * 4 * func_num
+ 2;
1535 pg_num
= max(pg_num_a
, pg_num_b
);
1536 size
= pg_num
* sizeof(struct hns_roce_link_table_entry
);
1538 link_tbl
->table
.buf
= dma_alloc_coherent(dev
, size
,
1539 &link_tbl
->table
.map
,
1541 if (!link_tbl
->table
.buf
)
1544 link_tbl
->pg_list
= kcalloc(pg_num
, sizeof(*link_tbl
->pg_list
),
1546 if (!link_tbl
->pg_list
)
1547 goto err_kcalloc_failed
;
1549 entry
= link_tbl
->table
.buf
;
1550 for (i
= 0; i
< pg_num
; ++i
) {
1551 link_tbl
->pg_list
[i
].buf
= dma_alloc_coherent(dev
, buf_chk_sz
,
1553 if (!link_tbl
->pg_list
[i
].buf
)
1554 goto err_alloc_buf_failed
;
1556 link_tbl
->pg_list
[i
].map
= t
;
1557 memset(link_tbl
->pg_list
[i
].buf
, 0, buf_chk_sz
);
1559 entry
[i
].blk_ba0
= (t
>> 12) & 0xffffffff;
1560 roce_set_field(entry
[i
].blk_ba1_nxt_ptr
,
1561 HNS_ROCE_LINK_TABLE_BA1_M
,
1562 HNS_ROCE_LINK_TABLE_BA1_S
,
1565 if (i
< (pg_num
- 1))
1566 roce_set_field(entry
[i
].blk_ba1_nxt_ptr
,
1567 HNS_ROCE_LINK_TABLE_NXT_PTR_M
,
1568 HNS_ROCE_LINK_TABLE_NXT_PTR_S
,
1571 link_tbl
->npages
= pg_num
;
1572 link_tbl
->pg_sz
= buf_chk_sz
;
1574 return hns_roce_config_link_table(hr_dev
, type
);
1576 err_alloc_buf_failed
:
1577 for (i
-= 1; i
>= 0; i
--)
1578 dma_free_coherent(dev
, buf_chk_sz
,
1579 link_tbl
->pg_list
[i
].buf
,
1580 link_tbl
->pg_list
[i
].map
);
1581 kfree(link_tbl
->pg_list
);
1584 dma_free_coherent(dev
, size
, link_tbl
->table
.buf
,
1585 link_tbl
->table
.map
);
1591 static void hns_roce_free_link_table(struct hns_roce_dev
*hr_dev
,
1592 struct hns_roce_link_table
*link_tbl
)
1594 struct device
*dev
= hr_dev
->dev
;
1598 size
= link_tbl
->npages
* sizeof(struct hns_roce_link_table_entry
);
1600 for (i
= 0; i
< link_tbl
->npages
; ++i
)
1601 if (link_tbl
->pg_list
[i
].buf
)
1602 dma_free_coherent(dev
, link_tbl
->pg_sz
,
1603 link_tbl
->pg_list
[i
].buf
,
1604 link_tbl
->pg_list
[i
].map
);
1605 kfree(link_tbl
->pg_list
);
1607 dma_free_coherent(dev
, size
, link_tbl
->table
.buf
,
1608 link_tbl
->table
.map
);
1611 static int hns_roce_v2_init(struct hns_roce_dev
*hr_dev
)
1613 struct hns_roce_v2_priv
*priv
= hr_dev
->priv
;
1616 /* TSQ includes SQ doorbell and ack doorbell */
1617 ret
= hns_roce_init_link_table(hr_dev
, TSQ_LINK_TABLE
);
1619 dev_err(hr_dev
->dev
, "TSQ init failed, ret = %d.\n", ret
);
1623 ret
= hns_roce_init_link_table(hr_dev
, TPQ_LINK_TABLE
);
1625 dev_err(hr_dev
->dev
, "TPQ init failed, ret = %d.\n", ret
);
1626 goto err_tpq_init_failed
;
1631 err_tpq_init_failed
:
1632 hns_roce_free_link_table(hr_dev
, &priv
->tsq
);
1637 static void hns_roce_v2_exit(struct hns_roce_dev
*hr_dev
)
1639 struct hns_roce_v2_priv
*priv
= hr_dev
->priv
;
1641 hns_roce_free_link_table(hr_dev
, &priv
->tpq
);
1642 hns_roce_free_link_table(hr_dev
, &priv
->tsq
);
1645 static int hns_roce_query_mbox_status(struct hns_roce_dev
*hr_dev
)
1647 struct hns_roce_cmq_desc desc
;
1648 struct hns_roce_mbox_status
*mb_st
=
1649 (struct hns_roce_mbox_status
*)desc
.data
;
1650 enum hns_roce_cmd_return_status status
;
1652 hns_roce_cmq_setup_basic_desc(&desc
, HNS_ROCE_OPC_QUERY_MB_ST
, true);
1654 status
= hns_roce_cmq_send(hr_dev
, &desc
, 1);
1658 return cpu_to_le32(mb_st
->mb_status_hw_run
);
1661 static int hns_roce_v2_cmd_pending(struct hns_roce_dev
*hr_dev
)
1663 u32 status
= hns_roce_query_mbox_status(hr_dev
);
1665 return status
>> HNS_ROCE_HW_RUN_BIT_SHIFT
;
1668 static int hns_roce_v2_cmd_complete(struct hns_roce_dev
*hr_dev
)
1670 u32 status
= hns_roce_query_mbox_status(hr_dev
);
1672 return status
& HNS_ROCE_HW_MB_STATUS_MASK
;
1675 static int hns_roce_mbox_post(struct hns_roce_dev
*hr_dev
, u64 in_param
,
1676 u64 out_param
, u32 in_modifier
, u8 op_modifier
,
1677 u16 op
, u16 token
, int event
)
1679 struct hns_roce_cmq_desc desc
;
1680 struct hns_roce_post_mbox
*mb
= (struct hns_roce_post_mbox
*)desc
.data
;
1682 hns_roce_cmq_setup_basic_desc(&desc
, HNS_ROCE_OPC_POST_MB
, false);
1684 mb
->in_param_l
= cpu_to_le64(in_param
);
1685 mb
->in_param_h
= cpu_to_le64(in_param
) >> 32;
1686 mb
->out_param_l
= cpu_to_le64(out_param
);
1687 mb
->out_param_h
= cpu_to_le64(out_param
) >> 32;
1688 mb
->cmd_tag
= cpu_to_le32(in_modifier
<< 8 | op
);
1689 mb
->token_event_en
= cpu_to_le32(event
<< 16 | token
);
1691 return hns_roce_cmq_send(hr_dev
, &desc
, 1);
1694 static int hns_roce_v2_post_mbox(struct hns_roce_dev
*hr_dev
, u64 in_param
,
1695 u64 out_param
, u32 in_modifier
, u8 op_modifier
,
1696 u16 op
, u16 token
, int event
)
1698 struct device
*dev
= hr_dev
->dev
;
1702 end
= msecs_to_jiffies(HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS
) + jiffies
;
1703 while (hns_roce_v2_cmd_pending(hr_dev
)) {
1704 if (time_after(jiffies
, end
)) {
1705 dev_dbg(dev
, "jiffies=%d end=%d\n", (int)jiffies
,
1712 ret
= hns_roce_mbox_post(hr_dev
, in_param
, out_param
, in_modifier
,
1713 op_modifier
, op
, token
, event
);
1715 dev_err(dev
, "Post mailbox fail(%d)\n", ret
);
1720 static int hns_roce_v2_chk_mbox(struct hns_roce_dev
*hr_dev
,
1721 unsigned long timeout
)
1723 struct device
*dev
= hr_dev
->dev
;
1724 unsigned long end
= 0;
1727 end
= msecs_to_jiffies(timeout
) + jiffies
;
1728 while (hns_roce_v2_cmd_pending(hr_dev
) && time_before(jiffies
, end
))
1731 if (hns_roce_v2_cmd_pending(hr_dev
)) {
1732 dev_err(dev
, "[cmd_poll]hw run cmd TIMEDOUT!\n");
1736 status
= hns_roce_v2_cmd_complete(hr_dev
);
1737 if (status
!= 0x1) {
1738 dev_err(dev
, "mailbox status 0x%x!\n", status
);
1745 static int hns_roce_config_sgid_table(struct hns_roce_dev
*hr_dev
,
1746 int gid_index
, const union ib_gid
*gid
,
1747 enum hns_roce_sgid_type sgid_type
)
1749 struct hns_roce_cmq_desc desc
;
1750 struct hns_roce_cfg_sgid_tb
*sgid_tb
=
1751 (struct hns_roce_cfg_sgid_tb
*)desc
.data
;
1754 hns_roce_cmq_setup_basic_desc(&desc
, HNS_ROCE_OPC_CFG_SGID_TB
, false);
1756 roce_set_field(sgid_tb
->table_idx_rsv
,
1757 CFG_SGID_TB_TABLE_IDX_M
,
1758 CFG_SGID_TB_TABLE_IDX_S
, gid_index
);
1759 roce_set_field(sgid_tb
->vf_sgid_type_rsv
,
1760 CFG_SGID_TB_VF_SGID_TYPE_M
,
1761 CFG_SGID_TB_VF_SGID_TYPE_S
, sgid_type
);
1763 p
= (u32
*)&gid
->raw
[0];
1764 sgid_tb
->vf_sgid_l
= cpu_to_le32(*p
);
1766 p
= (u32
*)&gid
->raw
[4];
1767 sgid_tb
->vf_sgid_ml
= cpu_to_le32(*p
);
1769 p
= (u32
*)&gid
->raw
[8];
1770 sgid_tb
->vf_sgid_mh
= cpu_to_le32(*p
);
1772 p
= (u32
*)&gid
->raw
[0xc];
1773 sgid_tb
->vf_sgid_h
= cpu_to_le32(*p
);
1775 return hns_roce_cmq_send(hr_dev
, &desc
, 1);
1778 static int hns_roce_v2_set_gid(struct hns_roce_dev
*hr_dev
, u8 port
,
1779 int gid_index
, const union ib_gid
*gid
,
1780 const struct ib_gid_attr
*attr
)
1782 enum hns_roce_sgid_type sgid_type
= GID_TYPE_FLAG_ROCE_V1
;
1788 if (attr
->gid_type
== IB_GID_TYPE_ROCE
)
1789 sgid_type
= GID_TYPE_FLAG_ROCE_V1
;
1791 if (attr
->gid_type
== IB_GID_TYPE_ROCE_UDP_ENCAP
) {
1792 if (ipv6_addr_v4mapped((void *)gid
))
1793 sgid_type
= GID_TYPE_FLAG_ROCE_V2_IPV4
;
1795 sgid_type
= GID_TYPE_FLAG_ROCE_V2_IPV6
;
1798 ret
= hns_roce_config_sgid_table(hr_dev
, gid_index
, gid
, sgid_type
);
1800 dev_err(hr_dev
->dev
, "Configure sgid table failed(%d)!\n", ret
);
1805 static int hns_roce_v2_set_mac(struct hns_roce_dev
*hr_dev
, u8 phy_port
,
1808 struct hns_roce_cmq_desc desc
;
1809 struct hns_roce_cfg_smac_tb
*smac_tb
=
1810 (struct hns_roce_cfg_smac_tb
*)desc
.data
;
1814 hns_roce_cmq_setup_basic_desc(&desc
, HNS_ROCE_OPC_CFG_SMAC_TB
, false);
1816 reg_smac_l
= *(u32
*)(&addr
[0]);
1817 reg_smac_h
= *(u16
*)(&addr
[4]);
1819 memset(smac_tb
, 0, sizeof(*smac_tb
));
1820 roce_set_field(smac_tb
->tb_idx_rsv
,
1822 CFG_SMAC_TB_IDX_S
, phy_port
);
1823 roce_set_field(smac_tb
->vf_smac_h_rsv
,
1824 CFG_SMAC_TB_VF_SMAC_H_M
,
1825 CFG_SMAC_TB_VF_SMAC_H_S
, reg_smac_h
);
1826 smac_tb
->vf_smac_l
= reg_smac_l
;
1828 return hns_roce_cmq_send(hr_dev
, &desc
, 1);
1831 static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry
*mpt_entry
,
1832 struct hns_roce_mr
*mr
)
1834 struct scatterlist
*sg
;
1841 mpt_entry
->pbl_size
= cpu_to_le32(mr
->pbl_size
);
1842 mpt_entry
->pbl_ba_l
= cpu_to_le32(lower_32_bits(mr
->pbl_ba
>> 3));
1843 roce_set_field(mpt_entry
->byte_48_mode_ba
,
1844 V2_MPT_BYTE_48_PBL_BA_H_M
, V2_MPT_BYTE_48_PBL_BA_H_S
,
1845 upper_32_bits(mr
->pbl_ba
>> 3));
1847 pages
= (u64
*)__get_free_page(GFP_KERNEL
);
1852 for_each_sg(mr
->umem
->sg_head
.sgl
, sg
, mr
->umem
->nmap
, entry
) {
1853 len
= sg_dma_len(sg
) >> PAGE_SHIFT
;
1854 for (j
= 0; j
< len
; ++j
) {
1855 page_addr
= sg_dma_address(sg
) +
1856 (j
<< mr
->umem
->page_shift
);
1857 pages
[i
] = page_addr
>> 6;
1858 /* Record the first 2 entry directly to MTPT table */
1859 if (i
>= HNS_ROCE_V2_MAX_INNER_MTPT_NUM
- 1)
1865 mpt_entry
->pa0_l
= cpu_to_le32(lower_32_bits(pages
[0]));
1866 roce_set_field(mpt_entry
->byte_56_pa0_h
, V2_MPT_BYTE_56_PA0_H_M
,
1867 V2_MPT_BYTE_56_PA0_H_S
, upper_32_bits(pages
[0]));
1869 mpt_entry
->pa1_l
= cpu_to_le32(lower_32_bits(pages
[1]));
1870 roce_set_field(mpt_entry
->byte_64_buf_pa1
, V2_MPT_BYTE_64_PA1_H_M
,
1871 V2_MPT_BYTE_64_PA1_H_S
, upper_32_bits(pages
[1]));
1872 roce_set_field(mpt_entry
->byte_64_buf_pa1
,
1873 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M
,
1874 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S
,
1875 mr
->pbl_buf_pg_sz
+ PG_SHIFT_OFFSET
);
1877 free_page((unsigned long)pages
);
1882 static int hns_roce_v2_write_mtpt(void *mb_buf
, struct hns_roce_mr
*mr
,
1883 unsigned long mtpt_idx
)
1885 struct hns_roce_v2_mpt_entry
*mpt_entry
;
1889 memset(mpt_entry
, 0, sizeof(*mpt_entry
));
1891 roce_set_field(mpt_entry
->byte_4_pd_hop_st
, V2_MPT_BYTE_4_MPT_ST_M
,
1892 V2_MPT_BYTE_4_MPT_ST_S
, V2_MPT_ST_VALID
);
1893 roce_set_field(mpt_entry
->byte_4_pd_hop_st
, V2_MPT_BYTE_4_PBL_HOP_NUM_M
,
1894 V2_MPT_BYTE_4_PBL_HOP_NUM_S
, mr
->pbl_hop_num
==
1895 HNS_ROCE_HOP_NUM_0
? 0 : mr
->pbl_hop_num
);
1896 roce_set_field(mpt_entry
->byte_4_pd_hop_st
,
1897 V2_MPT_BYTE_4_PBL_BA_PG_SZ_M
,
1898 V2_MPT_BYTE_4_PBL_BA_PG_SZ_S
,
1899 mr
->pbl_ba_pg_sz
+ PG_SHIFT_OFFSET
);
1900 roce_set_field(mpt_entry
->byte_4_pd_hop_st
, V2_MPT_BYTE_4_PD_M
,
1901 V2_MPT_BYTE_4_PD_S
, mr
->pd
);
1903 roce_set_bit(mpt_entry
->byte_8_mw_cnt_en
, V2_MPT_BYTE_8_RA_EN_S
, 0);
1904 roce_set_bit(mpt_entry
->byte_8_mw_cnt_en
, V2_MPT_BYTE_8_R_INV_EN_S
, 1);
1905 roce_set_bit(mpt_entry
->byte_8_mw_cnt_en
, V2_MPT_BYTE_8_L_INV_EN_S
, 1);
1906 roce_set_bit(mpt_entry
->byte_8_mw_cnt_en
, V2_MPT_BYTE_8_BIND_EN_S
,
1907 (mr
->access
& IB_ACCESS_MW_BIND
? 1 : 0));
1908 roce_set_bit(mpt_entry
->byte_8_mw_cnt_en
, V2_MPT_BYTE_8_ATOMIC_EN_S
,
1909 mr
->access
& IB_ACCESS_REMOTE_ATOMIC
? 1 : 0);
1910 roce_set_bit(mpt_entry
->byte_8_mw_cnt_en
, V2_MPT_BYTE_8_RR_EN_S
,
1911 (mr
->access
& IB_ACCESS_REMOTE_READ
? 1 : 0));
1912 roce_set_bit(mpt_entry
->byte_8_mw_cnt_en
, V2_MPT_BYTE_8_RW_EN_S
,
1913 (mr
->access
& IB_ACCESS_REMOTE_WRITE
? 1 : 0));
1914 roce_set_bit(mpt_entry
->byte_8_mw_cnt_en
, V2_MPT_BYTE_8_LW_EN_S
,
1915 (mr
->access
& IB_ACCESS_LOCAL_WRITE
? 1 : 0));
1917 roce_set_bit(mpt_entry
->byte_12_mw_pa
, V2_MPT_BYTE_12_PA_S
,
1918 mr
->type
== MR_TYPE_MR
? 0 : 1);
1919 roce_set_bit(mpt_entry
->byte_12_mw_pa
, V2_MPT_BYTE_12_INNER_PA_VLD_S
,
1922 mpt_entry
->len_l
= cpu_to_le32(lower_32_bits(mr
->size
));
1923 mpt_entry
->len_h
= cpu_to_le32(upper_32_bits(mr
->size
));
1924 mpt_entry
->lkey
= cpu_to_le32(mr
->key
);
1925 mpt_entry
->va_l
= cpu_to_le32(lower_32_bits(mr
->iova
));
1926 mpt_entry
->va_h
= cpu_to_le32(upper_32_bits(mr
->iova
));
1928 if (mr
->type
== MR_TYPE_DMA
)
1931 ret
= set_mtpt_pbl(mpt_entry
, mr
);
1936 static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev
*hr_dev
,
1937 struct hns_roce_mr
*mr
, int flags
,
1938 u32 pdn
, int mr_access_flags
, u64 iova
,
1939 u64 size
, void *mb_buf
)
1941 struct hns_roce_v2_mpt_entry
*mpt_entry
= mb_buf
;
1944 if (flags
& IB_MR_REREG_PD
) {
1945 roce_set_field(mpt_entry
->byte_4_pd_hop_st
, V2_MPT_BYTE_4_PD_M
,
1946 V2_MPT_BYTE_4_PD_S
, pdn
);
1950 if (flags
& IB_MR_REREG_ACCESS
) {
1951 roce_set_bit(mpt_entry
->byte_8_mw_cnt_en
,
1952 V2_MPT_BYTE_8_BIND_EN_S
,
1953 (mr_access_flags
& IB_ACCESS_MW_BIND
? 1 : 0));
1954 roce_set_bit(mpt_entry
->byte_8_mw_cnt_en
,
1955 V2_MPT_BYTE_8_ATOMIC_EN_S
,
1956 mr_access_flags
& IB_ACCESS_REMOTE_ATOMIC
? 1 : 0);
1957 roce_set_bit(mpt_entry
->byte_8_mw_cnt_en
, V2_MPT_BYTE_8_RR_EN_S
,
1958 mr_access_flags
& IB_ACCESS_REMOTE_READ
? 1 : 0);
1959 roce_set_bit(mpt_entry
->byte_8_mw_cnt_en
, V2_MPT_BYTE_8_RW_EN_S
,
1960 mr_access_flags
& IB_ACCESS_REMOTE_WRITE
? 1 : 0);
1961 roce_set_bit(mpt_entry
->byte_8_mw_cnt_en
, V2_MPT_BYTE_8_LW_EN_S
,
1962 mr_access_flags
& IB_ACCESS_LOCAL_WRITE
? 1 : 0);
1965 if (flags
& IB_MR_REREG_TRANS
) {
1966 mpt_entry
->va_l
= cpu_to_le32(lower_32_bits(iova
));
1967 mpt_entry
->va_h
= cpu_to_le32(upper_32_bits(iova
));
1968 mpt_entry
->len_l
= cpu_to_le32(lower_32_bits(size
));
1969 mpt_entry
->len_h
= cpu_to_le32(upper_32_bits(size
));
1974 ret
= set_mtpt_pbl(mpt_entry
, mr
);
1980 static int hns_roce_v2_frmr_write_mtpt(void *mb_buf
, struct hns_roce_mr
*mr
)
1982 struct hns_roce_v2_mpt_entry
*mpt_entry
;
1985 memset(mpt_entry
, 0, sizeof(*mpt_entry
));
1987 roce_set_field(mpt_entry
->byte_4_pd_hop_st
, V2_MPT_BYTE_4_MPT_ST_M
,
1988 V2_MPT_BYTE_4_MPT_ST_S
, V2_MPT_ST_FREE
);
1989 roce_set_field(mpt_entry
->byte_4_pd_hop_st
, V2_MPT_BYTE_4_PBL_HOP_NUM_M
,
1990 V2_MPT_BYTE_4_PBL_HOP_NUM_S
, 1);
1991 roce_set_field(mpt_entry
->byte_4_pd_hop_st
,
1992 V2_MPT_BYTE_4_PBL_BA_PG_SZ_M
,
1993 V2_MPT_BYTE_4_PBL_BA_PG_SZ_S
,
1994 mr
->pbl_ba_pg_sz
+ PG_SHIFT_OFFSET
);
1995 roce_set_field(mpt_entry
->byte_4_pd_hop_st
, V2_MPT_BYTE_4_PD_M
,
1996 V2_MPT_BYTE_4_PD_S
, mr
->pd
);
1998 roce_set_bit(mpt_entry
->byte_8_mw_cnt_en
, V2_MPT_BYTE_8_RA_EN_S
, 1);
1999 roce_set_bit(mpt_entry
->byte_8_mw_cnt_en
, V2_MPT_BYTE_8_R_INV_EN_S
, 1);
2000 roce_set_bit(mpt_entry
->byte_8_mw_cnt_en
, V2_MPT_BYTE_8_L_INV_EN_S
, 1);
2002 roce_set_bit(mpt_entry
->byte_12_mw_pa
, V2_MPT_BYTE_12_FRE_S
, 1);
2003 roce_set_bit(mpt_entry
->byte_12_mw_pa
, V2_MPT_BYTE_12_PA_S
, 0);
2004 roce_set_bit(mpt_entry
->byte_12_mw_pa
, V2_MPT_BYTE_12_MR_MW_S
, 0);
2005 roce_set_bit(mpt_entry
->byte_12_mw_pa
, V2_MPT_BYTE_12_BPD_S
, 1);
2007 mpt_entry
->pbl_size
= cpu_to_le32(mr
->pbl_size
);
2009 mpt_entry
->pbl_ba_l
= cpu_to_le32(lower_32_bits(mr
->pbl_ba
>> 3));
2010 roce_set_field(mpt_entry
->byte_48_mode_ba
, V2_MPT_BYTE_48_PBL_BA_H_M
,
2011 V2_MPT_BYTE_48_PBL_BA_H_S
,
2012 upper_32_bits(mr
->pbl_ba
>> 3));
2014 roce_set_field(mpt_entry
->byte_64_buf_pa1
,
2015 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M
,
2016 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S
,
2017 mr
->pbl_buf_pg_sz
+ PG_SHIFT_OFFSET
);
2022 static int hns_roce_v2_mw_write_mtpt(void *mb_buf
, struct hns_roce_mw
*mw
)
2024 struct hns_roce_v2_mpt_entry
*mpt_entry
;
2027 memset(mpt_entry
, 0, sizeof(*mpt_entry
));
2029 roce_set_field(mpt_entry
->byte_4_pd_hop_st
, V2_MPT_BYTE_4_MPT_ST_M
,
2030 V2_MPT_BYTE_4_MPT_ST_S
, V2_MPT_ST_FREE
);
2031 roce_set_field(mpt_entry
->byte_4_pd_hop_st
, V2_MPT_BYTE_4_PD_M
,
2032 V2_MPT_BYTE_4_PD_S
, mw
->pdn
);
2033 roce_set_field(mpt_entry
->byte_4_pd_hop_st
,
2034 V2_MPT_BYTE_4_PBL_HOP_NUM_M
,
2035 V2_MPT_BYTE_4_PBL_HOP_NUM_S
,
2036 mw
->pbl_hop_num
== HNS_ROCE_HOP_NUM_0
?
2037 0 : mw
->pbl_hop_num
);
2038 roce_set_field(mpt_entry
->byte_4_pd_hop_st
,
2039 V2_MPT_BYTE_4_PBL_BA_PG_SZ_M
,
2040 V2_MPT_BYTE_4_PBL_BA_PG_SZ_S
,
2041 mw
->pbl_ba_pg_sz
+ PG_SHIFT_OFFSET
);
2043 roce_set_bit(mpt_entry
->byte_8_mw_cnt_en
, V2_MPT_BYTE_8_R_INV_EN_S
, 1);
2044 roce_set_bit(mpt_entry
->byte_8_mw_cnt_en
, V2_MPT_BYTE_8_L_INV_EN_S
, 1);
2046 roce_set_bit(mpt_entry
->byte_12_mw_pa
, V2_MPT_BYTE_12_PA_S
, 0);
2047 roce_set_bit(mpt_entry
->byte_12_mw_pa
, V2_MPT_BYTE_12_MR_MW_S
, 1);
2048 roce_set_bit(mpt_entry
->byte_12_mw_pa
, V2_MPT_BYTE_12_BPD_S
, 1);
2049 roce_set_bit(mpt_entry
->byte_12_mw_pa
, V2_MPT_BYTE_12_BQP_S
,
2050 mw
->ibmw
.type
== IB_MW_TYPE_1
? 0 : 1);
2052 roce_set_field(mpt_entry
->byte_64_buf_pa1
,
2053 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M
,
2054 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S
,
2055 mw
->pbl_buf_pg_sz
+ PG_SHIFT_OFFSET
);
2057 mpt_entry
->lkey
= cpu_to_le32(mw
->rkey
);
2062 static void *get_cqe_v2(struct hns_roce_cq
*hr_cq
, int n
)
2064 return hns_roce_buf_offset(&hr_cq
->hr_buf
.hr_buf
,
2065 n
* HNS_ROCE_V2_CQE_ENTRY_SIZE
);
2068 static void *get_sw_cqe_v2(struct hns_roce_cq
*hr_cq
, int n
)
2070 struct hns_roce_v2_cqe
*cqe
= get_cqe_v2(hr_cq
, n
& hr_cq
->ib_cq
.cqe
);
2072 /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
2073 return (roce_get_bit(cqe
->byte_4
, V2_CQE_BYTE_4_OWNER_S
) ^
2074 !!(n
& (hr_cq
->ib_cq
.cqe
+ 1))) ? cqe
: NULL
;
2077 static struct hns_roce_v2_cqe
*next_cqe_sw_v2(struct hns_roce_cq
*hr_cq
)
2079 return get_sw_cqe_v2(hr_cq
, hr_cq
->cons_index
);
2082 static void *get_srq_wqe(struct hns_roce_srq
*srq
, int n
)
2084 return hns_roce_buf_offset(&srq
->buf
, n
<< srq
->wqe_shift
);
2087 static void hns_roce_free_srq_wqe(struct hns_roce_srq
*srq
, int wqe_index
)
2092 /* always called with interrupts disabled. */
2093 spin_lock(&srq
->lock
);
2095 bitmap_num
= wqe_index
/ (sizeof(u64
) * 8);
2096 bit_num
= wqe_index
% (sizeof(u64
) * 8);
2097 srq
->idx_que
.bitmap
[bitmap_num
] |= (1ULL << bit_num
);
2100 spin_unlock(&srq
->lock
);
2103 static void hns_roce_v2_cq_set_ci(struct hns_roce_cq
*hr_cq
, u32 cons_index
)
2105 *hr_cq
->set_ci_db
= cons_index
& 0xffffff;
2108 static void __hns_roce_v2_cq_clean(struct hns_roce_cq
*hr_cq
, u32 qpn
,
2109 struct hns_roce_srq
*srq
)
2111 struct hns_roce_v2_cqe
*cqe
, *dest
;
2117 for (prod_index
= hr_cq
->cons_index
; get_sw_cqe_v2(hr_cq
, prod_index
);
2119 if (prod_index
== hr_cq
->cons_index
+ hr_cq
->ib_cq
.cqe
)
2124 * Now backwards through the CQ, removing CQ entries
2125 * that match our QP by overwriting them with next entries.
2127 while ((int) --prod_index
- (int) hr_cq
->cons_index
>= 0) {
2128 cqe
= get_cqe_v2(hr_cq
, prod_index
& hr_cq
->ib_cq
.cqe
);
2129 if ((roce_get_field(cqe
->byte_16
, V2_CQE_BYTE_16_LCL_QPN_M
,
2130 V2_CQE_BYTE_16_LCL_QPN_S
) &
2131 HNS_ROCE_V2_CQE_QPN_MASK
) == qpn
) {
2133 roce_get_bit(cqe
->byte_4
, V2_CQE_BYTE_4_S_R_S
)) {
2134 wqe_index
= roce_get_field(cqe
->byte_4
,
2135 V2_CQE_BYTE_4_WQE_INDX_M
,
2136 V2_CQE_BYTE_4_WQE_INDX_S
);
2137 hns_roce_free_srq_wqe(srq
, wqe_index
);
2140 } else if (nfreed
) {
2141 dest
= get_cqe_v2(hr_cq
, (prod_index
+ nfreed
) &
2143 owner_bit
= roce_get_bit(dest
->byte_4
,
2144 V2_CQE_BYTE_4_OWNER_S
);
2145 memcpy(dest
, cqe
, sizeof(*cqe
));
2146 roce_set_bit(dest
->byte_4
, V2_CQE_BYTE_4_OWNER_S
,
2152 hr_cq
->cons_index
+= nfreed
;
2154 * Make sure update of buffer contents is done before
2155 * updating consumer index.
2158 hns_roce_v2_cq_set_ci(hr_cq
, hr_cq
->cons_index
);
2162 static void hns_roce_v2_cq_clean(struct hns_roce_cq
*hr_cq
, u32 qpn
,
2163 struct hns_roce_srq
*srq
)
2165 spin_lock_irq(&hr_cq
->lock
);
2166 __hns_roce_v2_cq_clean(hr_cq
, qpn
, srq
);
2167 spin_unlock_irq(&hr_cq
->lock
);
2170 static void hns_roce_v2_write_cqc(struct hns_roce_dev
*hr_dev
,
2171 struct hns_roce_cq
*hr_cq
, void *mb_buf
,
2172 u64
*mtts
, dma_addr_t dma_handle
, int nent
,
2175 struct hns_roce_v2_cq_context
*cq_context
;
2177 cq_context
= mb_buf
;
2178 memset(cq_context
, 0, sizeof(*cq_context
));
2180 roce_set_field(cq_context
->byte_4_pg_ceqn
, V2_CQC_BYTE_4_CQ_ST_M
,
2181 V2_CQC_BYTE_4_CQ_ST_S
, V2_CQ_STATE_VALID
);
2182 roce_set_field(cq_context
->byte_4_pg_ceqn
, V2_CQC_BYTE_4_ARM_ST_M
,
2183 V2_CQC_BYTE_4_ARM_ST_S
, REG_NXT_CEQE
);
2184 roce_set_field(cq_context
->byte_4_pg_ceqn
, V2_CQC_BYTE_4_SHIFT_M
,
2185 V2_CQC_BYTE_4_SHIFT_S
, ilog2((unsigned int)nent
));
2186 roce_set_field(cq_context
->byte_4_pg_ceqn
, V2_CQC_BYTE_4_CEQN_M
,
2187 V2_CQC_BYTE_4_CEQN_S
, vector
);
2188 cq_context
->byte_4_pg_ceqn
= cpu_to_le32(cq_context
->byte_4_pg_ceqn
);
2190 roce_set_field(cq_context
->byte_8_cqn
, V2_CQC_BYTE_8_CQN_M
,
2191 V2_CQC_BYTE_8_CQN_S
, hr_cq
->cqn
);
2193 cq_context
->cqe_cur_blk_addr
= (u32
)(mtts
[0] >> PAGE_ADDR_SHIFT
);
2194 cq_context
->cqe_cur_blk_addr
=
2195 cpu_to_le32(cq_context
->cqe_cur_blk_addr
);
2197 roce_set_field(cq_context
->byte_16_hop_addr
,
2198 V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M
,
2199 V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S
,
2200 cpu_to_le32((mtts
[0]) >> (32 + PAGE_ADDR_SHIFT
)));
2201 roce_set_field(cq_context
->byte_16_hop_addr
,
2202 V2_CQC_BYTE_16_CQE_HOP_NUM_M
,
2203 V2_CQC_BYTE_16_CQE_HOP_NUM_S
, hr_dev
->caps
.cqe_hop_num
==
2204 HNS_ROCE_HOP_NUM_0
? 0 : hr_dev
->caps
.cqe_hop_num
);
2206 cq_context
->cqe_nxt_blk_addr
= (u32
)(mtts
[1] >> PAGE_ADDR_SHIFT
);
2207 roce_set_field(cq_context
->byte_24_pgsz_addr
,
2208 V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_M
,
2209 V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_S
,
2210 cpu_to_le32((mtts
[1]) >> (32 + PAGE_ADDR_SHIFT
)));
2211 roce_set_field(cq_context
->byte_24_pgsz_addr
,
2212 V2_CQC_BYTE_24_CQE_BA_PG_SZ_M
,
2213 V2_CQC_BYTE_24_CQE_BA_PG_SZ_S
,
2214 hr_dev
->caps
.cqe_ba_pg_sz
+ PG_SHIFT_OFFSET
);
2215 roce_set_field(cq_context
->byte_24_pgsz_addr
,
2216 V2_CQC_BYTE_24_CQE_BUF_PG_SZ_M
,
2217 V2_CQC_BYTE_24_CQE_BUF_PG_SZ_S
,
2218 hr_dev
->caps
.cqe_buf_pg_sz
+ PG_SHIFT_OFFSET
);
2220 cq_context
->cqe_ba
= (u32
)(dma_handle
>> 3);
2222 roce_set_field(cq_context
->byte_40_cqe_ba
, V2_CQC_BYTE_40_CQE_BA_M
,
2223 V2_CQC_BYTE_40_CQE_BA_S
, (dma_handle
>> (32 + 3)));
2226 roce_set_bit(cq_context
->byte_44_db_record
,
2227 V2_CQC_BYTE_44_DB_RECORD_EN_S
, 1);
2229 roce_set_field(cq_context
->byte_44_db_record
,
2230 V2_CQC_BYTE_44_DB_RECORD_ADDR_M
,
2231 V2_CQC_BYTE_44_DB_RECORD_ADDR_S
,
2232 ((u32
)hr_cq
->db
.dma
) >> 1);
2233 cq_context
->db_record_addr
= hr_cq
->db
.dma
>> 32;
2235 roce_set_field(cq_context
->byte_56_cqe_period_maxcnt
,
2236 V2_CQC_BYTE_56_CQ_MAX_CNT_M
,
2237 V2_CQC_BYTE_56_CQ_MAX_CNT_S
,
2238 HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM
);
2239 roce_set_field(cq_context
->byte_56_cqe_period_maxcnt
,
2240 V2_CQC_BYTE_56_CQ_PERIOD_M
,
2241 V2_CQC_BYTE_56_CQ_PERIOD_S
,
2242 HNS_ROCE_V2_CQ_DEFAULT_INTERVAL
);
2245 static int hns_roce_v2_req_notify_cq(struct ib_cq
*ibcq
,
2246 enum ib_cq_notify_flags flags
)
2248 struct hns_roce_cq
*hr_cq
= to_hr_cq(ibcq
);
2249 u32 notification_flag
;
2255 notification_flag
= (flags
& IB_CQ_SOLICITED_MASK
) == IB_CQ_SOLICITED
?
2256 V2_CQ_DB_REQ_NOT
: V2_CQ_DB_REQ_NOT_SOL
;
2258 * flags = 0; Notification Flag = 1, next
2259 * flags = 1; Notification Flag = 0, solocited
2261 roce_set_field(doorbell
[0], V2_CQ_DB_BYTE_4_TAG_M
, V2_DB_BYTE_4_TAG_S
,
2263 roce_set_field(doorbell
[0], V2_CQ_DB_BYTE_4_CMD_M
, V2_DB_BYTE_4_CMD_S
,
2264 HNS_ROCE_V2_CQ_DB_NTR
);
2265 roce_set_field(doorbell
[1], V2_CQ_DB_PARAMETER_CONS_IDX_M
,
2266 V2_CQ_DB_PARAMETER_CONS_IDX_S
,
2267 hr_cq
->cons_index
& ((hr_cq
->cq_depth
<< 1) - 1));
2268 roce_set_field(doorbell
[1], V2_CQ_DB_PARAMETER_CMD_SN_M
,
2269 V2_CQ_DB_PARAMETER_CMD_SN_S
, hr_cq
->arm_sn
& 0x3);
2270 roce_set_bit(doorbell
[1], V2_CQ_DB_PARAMETER_NOTIFY_S
,
2273 hns_roce_write64_k(doorbell
, hr_cq
->cq_db_l
);
2278 static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe
*cqe
,
2279 struct hns_roce_qp
**cur_qp
,
2282 struct hns_roce_rinl_sge
*sge_list
;
2283 u32 wr_num
, wr_cnt
, sge_num
;
2284 u32 sge_cnt
, data_len
, size
;
2287 wr_num
= roce_get_field(cqe
->byte_4
, V2_CQE_BYTE_4_WQE_INDX_M
,
2288 V2_CQE_BYTE_4_WQE_INDX_S
) & 0xffff;
2289 wr_cnt
= wr_num
& ((*cur_qp
)->rq
.wqe_cnt
- 1);
2291 sge_list
= (*cur_qp
)->rq_inl_buf
.wqe_list
[wr_cnt
].sg_list
;
2292 sge_num
= (*cur_qp
)->rq_inl_buf
.wqe_list
[wr_cnt
].sge_cnt
;
2293 wqe_buf
= get_recv_wqe(*cur_qp
, wr_cnt
);
2294 data_len
= wc
->byte_len
;
2296 for (sge_cnt
= 0; (sge_cnt
< sge_num
) && (data_len
); sge_cnt
++) {
2297 size
= min(sge_list
[sge_cnt
].len
, data_len
);
2298 memcpy((void *)sge_list
[sge_cnt
].addr
, wqe_buf
, size
);
2305 wc
->status
= IB_WC_LOC_LEN_ERR
;
2312 static int hns_roce_v2_poll_one(struct hns_roce_cq
*hr_cq
,
2313 struct hns_roce_qp
**cur_qp
, struct ib_wc
*wc
)
2315 struct hns_roce_srq
*srq
= NULL
;
2316 struct hns_roce_dev
*hr_dev
;
2317 struct hns_roce_v2_cqe
*cqe
;
2318 struct hns_roce_qp
*hr_qp
;
2319 struct hns_roce_wq
*wq
;
2320 struct ib_qp_attr attr
;
2329 /* Find cqe according to consumer index */
2330 cqe
= next_cqe_sw_v2(hr_cq
);
2334 ++hr_cq
->cons_index
;
2335 /* Memory barrier */
2339 is_send
= !roce_get_bit(cqe
->byte_4
, V2_CQE_BYTE_4_S_R_S
);
2341 qpn
= roce_get_field(cqe
->byte_16
, V2_CQE_BYTE_16_LCL_QPN_M
,
2342 V2_CQE_BYTE_16_LCL_QPN_S
);
2344 if (!*cur_qp
|| (qpn
& HNS_ROCE_V2_CQE_QPN_MASK
) != (*cur_qp
)->qpn
) {
2345 hr_dev
= to_hr_dev(hr_cq
->ib_cq
.device
);
2346 hr_qp
= __hns_roce_qp_lookup(hr_dev
, qpn
);
2347 if (unlikely(!hr_qp
)) {
2348 dev_err(hr_dev
->dev
, "CQ %06lx with entry for unknown QPN %06x\n",
2349 hr_cq
->cqn
, (qpn
& HNS_ROCE_V2_CQE_QPN_MASK
));
2355 wc
->qp
= &(*cur_qp
)->ibqp
;
2359 wq
= &(*cur_qp
)->sq
;
2360 if ((*cur_qp
)->sq_signal_bits
) {
2362 * If sg_signal_bit is 1,
2363 * firstly tail pointer updated to wqe
2364 * which current cqe correspond to
2366 wqe_ctr
= (u16
)roce_get_field(cqe
->byte_4
,
2367 V2_CQE_BYTE_4_WQE_INDX_M
,
2368 V2_CQE_BYTE_4_WQE_INDX_S
);
2369 wq
->tail
+= (wqe_ctr
- (u16
)wq
->tail
) &
2373 wc
->wr_id
= wq
->wrid
[wq
->tail
& (wq
->wqe_cnt
- 1)];
2375 } else if ((*cur_qp
)->ibqp
.srq
) {
2376 srq
= to_hr_srq((*cur_qp
)->ibqp
.srq
);
2377 wqe_ctr
= le16_to_cpu(roce_get_field(cqe
->byte_4
,
2378 V2_CQE_BYTE_4_WQE_INDX_M
,
2379 V2_CQE_BYTE_4_WQE_INDX_S
));
2380 wc
->wr_id
= srq
->wrid
[wqe_ctr
];
2381 hns_roce_free_srq_wqe(srq
, wqe_ctr
);
2383 /* Update tail pointer, record wr_id */
2384 wq
= &(*cur_qp
)->rq
;
2385 wc
->wr_id
= wq
->wrid
[wq
->tail
& (wq
->wqe_cnt
- 1)];
2389 status
= roce_get_field(cqe
->byte_4
, V2_CQE_BYTE_4_STATUS_M
,
2390 V2_CQE_BYTE_4_STATUS_S
);
2391 switch (status
& HNS_ROCE_V2_CQE_STATUS_MASK
) {
2392 case HNS_ROCE_CQE_V2_SUCCESS
:
2393 wc
->status
= IB_WC_SUCCESS
;
2395 case HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR
:
2396 wc
->status
= IB_WC_LOC_LEN_ERR
;
2398 case HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR
:
2399 wc
->status
= IB_WC_LOC_QP_OP_ERR
;
2401 case HNS_ROCE_CQE_V2_LOCAL_PROT_ERR
:
2402 wc
->status
= IB_WC_LOC_PROT_ERR
;
2404 case HNS_ROCE_CQE_V2_WR_FLUSH_ERR
:
2405 wc
->status
= IB_WC_WR_FLUSH_ERR
;
2407 case HNS_ROCE_CQE_V2_MW_BIND_ERR
:
2408 wc
->status
= IB_WC_MW_BIND_ERR
;
2410 case HNS_ROCE_CQE_V2_BAD_RESP_ERR
:
2411 wc
->status
= IB_WC_BAD_RESP_ERR
;
2413 case HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR
:
2414 wc
->status
= IB_WC_LOC_ACCESS_ERR
;
2416 case HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR
:
2417 wc
->status
= IB_WC_REM_INV_REQ_ERR
;
2419 case HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR
:
2420 wc
->status
= IB_WC_REM_ACCESS_ERR
;
2422 case HNS_ROCE_CQE_V2_REMOTE_OP_ERR
:
2423 wc
->status
= IB_WC_REM_OP_ERR
;
2425 case HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR
:
2426 wc
->status
= IB_WC_RETRY_EXC_ERR
;
2428 case HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR
:
2429 wc
->status
= IB_WC_RNR_RETRY_EXC_ERR
;
2431 case HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR
:
2432 wc
->status
= IB_WC_REM_ABORT_ERR
;
2435 wc
->status
= IB_WC_GENERAL_ERR
;
2439 /* flush cqe if wc status is error, excluding flush error */
2440 if ((wc
->status
!= IB_WC_SUCCESS
) &&
2441 (wc
->status
!= IB_WC_WR_FLUSH_ERR
)) {
2442 attr_mask
= IB_QP_STATE
;
2443 attr
.qp_state
= IB_QPS_ERR
;
2444 return hns_roce_v2_modify_qp(&(*cur_qp
)->ibqp
,
2446 (*cur_qp
)->state
, IB_QPS_ERR
);
2449 if (wc
->status
== IB_WC_WR_FLUSH_ERR
)
2454 /* SQ corresponding to CQE */
2455 switch (roce_get_field(cqe
->byte_4
, V2_CQE_BYTE_4_OPCODE_M
,
2456 V2_CQE_BYTE_4_OPCODE_S
) & 0x1f) {
2457 case HNS_ROCE_SQ_OPCODE_SEND
:
2458 wc
->opcode
= IB_WC_SEND
;
2460 case HNS_ROCE_SQ_OPCODE_SEND_WITH_INV
:
2461 wc
->opcode
= IB_WC_SEND
;
2463 case HNS_ROCE_SQ_OPCODE_SEND_WITH_IMM
:
2464 wc
->opcode
= IB_WC_SEND
;
2465 wc
->wc_flags
|= IB_WC_WITH_IMM
;
2467 case HNS_ROCE_SQ_OPCODE_RDMA_READ
:
2468 wc
->opcode
= IB_WC_RDMA_READ
;
2469 wc
->byte_len
= le32_to_cpu(cqe
->byte_cnt
);
2471 case HNS_ROCE_SQ_OPCODE_RDMA_WRITE
:
2472 wc
->opcode
= IB_WC_RDMA_WRITE
;
2474 case HNS_ROCE_SQ_OPCODE_RDMA_WRITE_WITH_IMM
:
2475 wc
->opcode
= IB_WC_RDMA_WRITE
;
2476 wc
->wc_flags
|= IB_WC_WITH_IMM
;
2478 case HNS_ROCE_SQ_OPCODE_LOCAL_INV
:
2479 wc
->opcode
= IB_WC_LOCAL_INV
;
2480 wc
->wc_flags
|= IB_WC_WITH_INVALIDATE
;
2482 case HNS_ROCE_SQ_OPCODE_ATOMIC_COMP_AND_SWAP
:
2483 wc
->opcode
= IB_WC_COMP_SWAP
;
2486 case HNS_ROCE_SQ_OPCODE_ATOMIC_FETCH_AND_ADD
:
2487 wc
->opcode
= IB_WC_FETCH_ADD
;
2490 case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_COMP_AND_SWAP
:
2491 wc
->opcode
= IB_WC_MASKED_COMP_SWAP
;
2494 case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_FETCH_AND_ADD
:
2495 wc
->opcode
= IB_WC_MASKED_FETCH_ADD
;
2498 case HNS_ROCE_SQ_OPCODE_FAST_REG_WR
:
2499 wc
->opcode
= IB_WC_REG_MR
;
2501 case HNS_ROCE_SQ_OPCODE_BIND_MW
:
2502 wc
->opcode
= IB_WC_REG_MR
;
2505 wc
->status
= IB_WC_GENERAL_ERR
;
2509 /* RQ correspond to CQE */
2510 wc
->byte_len
= le32_to_cpu(cqe
->byte_cnt
);
2512 opcode
= roce_get_field(cqe
->byte_4
, V2_CQE_BYTE_4_OPCODE_M
,
2513 V2_CQE_BYTE_4_OPCODE_S
);
2514 switch (opcode
& 0x1f) {
2515 case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM
:
2516 wc
->opcode
= IB_WC_RECV_RDMA_WITH_IMM
;
2517 wc
->wc_flags
= IB_WC_WITH_IMM
;
2519 cpu_to_be32(le32_to_cpu(cqe
->immtdata
));
2521 case HNS_ROCE_V2_OPCODE_SEND
:
2522 wc
->opcode
= IB_WC_RECV
;
2525 case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM
:
2526 wc
->opcode
= IB_WC_RECV
;
2527 wc
->wc_flags
= IB_WC_WITH_IMM
;
2529 cpu_to_be32(le32_to_cpu(cqe
->immtdata
));
2531 case HNS_ROCE_V2_OPCODE_SEND_WITH_INV
:
2532 wc
->opcode
= IB_WC_RECV
;
2533 wc
->wc_flags
= IB_WC_WITH_INVALIDATE
;
2534 wc
->ex
.invalidate_rkey
= le32_to_cpu(cqe
->rkey
);
2537 wc
->status
= IB_WC_GENERAL_ERR
;
2541 if ((wc
->qp
->qp_type
== IB_QPT_RC
||
2542 wc
->qp
->qp_type
== IB_QPT_UC
) &&
2543 (opcode
== HNS_ROCE_V2_OPCODE_SEND
||
2544 opcode
== HNS_ROCE_V2_OPCODE_SEND_WITH_IMM
||
2545 opcode
== HNS_ROCE_V2_OPCODE_SEND_WITH_INV
) &&
2546 (roce_get_bit(cqe
->byte_4
, V2_CQE_BYTE_4_RQ_INLINE_S
))) {
2547 ret
= hns_roce_handle_recv_inl_wqe(cqe
, cur_qp
, wc
);
2552 wc
->sl
= (u8
)roce_get_field(cqe
->byte_32
, V2_CQE_BYTE_32_SL_M
,
2553 V2_CQE_BYTE_32_SL_S
);
2554 wc
->src_qp
= (u8
)roce_get_field(cqe
->byte_32
,
2555 V2_CQE_BYTE_32_RMT_QPN_M
,
2556 V2_CQE_BYTE_32_RMT_QPN_S
);
2558 wc
->wc_flags
|= (roce_get_bit(cqe
->byte_32
,
2559 V2_CQE_BYTE_32_GRH_S
) ?
2561 wc
->port_num
= roce_get_field(cqe
->byte_32
,
2562 V2_CQE_BYTE_32_PORTN_M
, V2_CQE_BYTE_32_PORTN_S
);
2564 memcpy(wc
->smac
, cqe
->smac
, 4);
2565 wc
->smac
[4] = roce_get_field(cqe
->byte_28
,
2566 V2_CQE_BYTE_28_SMAC_4_M
,
2567 V2_CQE_BYTE_28_SMAC_4_S
);
2568 wc
->smac
[5] = roce_get_field(cqe
->byte_28
,
2569 V2_CQE_BYTE_28_SMAC_5_M
,
2570 V2_CQE_BYTE_28_SMAC_5_S
);
2571 if (roce_get_bit(cqe
->byte_28
, V2_CQE_BYTE_28_VID_VLD_S
)) {
2572 wc
->vlan_id
= (u16
)roce_get_field(cqe
->byte_28
,
2573 V2_CQE_BYTE_28_VID_M
,
2574 V2_CQE_BYTE_28_VID_S
);
2576 wc
->vlan_id
= 0xffff;
2579 wc
->wc_flags
|= (IB_WC_WITH_VLAN
| IB_WC_WITH_SMAC
);
2580 wc
->network_hdr_type
= roce_get_field(cqe
->byte_28
,
2581 V2_CQE_BYTE_28_PORT_TYPE_M
,
2582 V2_CQE_BYTE_28_PORT_TYPE_S
);
2588 static int hns_roce_v2_poll_cq(struct ib_cq
*ibcq
, int num_entries
,
2591 struct hns_roce_cq
*hr_cq
= to_hr_cq(ibcq
);
2592 struct hns_roce_qp
*cur_qp
= NULL
;
2593 unsigned long flags
;
2596 spin_lock_irqsave(&hr_cq
->lock
, flags
);
2598 for (npolled
= 0; npolled
< num_entries
; ++npolled
) {
2599 if (hns_roce_v2_poll_one(hr_cq
, &cur_qp
, wc
+ npolled
))
2604 /* Memory barrier */
2606 hns_roce_v2_cq_set_ci(hr_cq
, hr_cq
->cons_index
);
2609 spin_unlock_irqrestore(&hr_cq
->lock
, flags
);
2614 static int hns_roce_v2_set_hem(struct hns_roce_dev
*hr_dev
,
2615 struct hns_roce_hem_table
*table
, int obj
,
2618 struct device
*dev
= hr_dev
->dev
;
2619 struct hns_roce_cmd_mailbox
*mailbox
;
2620 struct hns_roce_hem_iter iter
;
2621 struct hns_roce_hem_mhop mhop
;
2622 struct hns_roce_hem
*hem
;
2623 unsigned long mhop_obj
= obj
;
2633 if (!hns_roce_check_whether_mhop(hr_dev
, table
->type
))
2636 hns_roce_calc_hem_mhop(hr_dev
, table
, &mhop_obj
, &mhop
);
2640 hop_num
= mhop
.hop_num
;
2641 chunk_ba_num
= mhop
.bt_chunk_size
/ 8;
2644 hem_idx
= i
* chunk_ba_num
* chunk_ba_num
+ j
* chunk_ba_num
+
2646 l1_idx
= i
* chunk_ba_num
+ j
;
2647 } else if (hop_num
== 1) {
2648 hem_idx
= i
* chunk_ba_num
+ j
;
2649 } else if (hop_num
== HNS_ROCE_HOP_NUM_0
) {
2653 switch (table
->type
) {
2655 op
= HNS_ROCE_CMD_WRITE_QPC_BT0
;
2658 op
= HNS_ROCE_CMD_WRITE_MPT_BT0
;
2661 op
= HNS_ROCE_CMD_WRITE_CQC_BT0
;
2664 op
= HNS_ROCE_CMD_WRITE_SRQC_BT0
;
2667 dev_warn(dev
, "Table %d not to be written by mailbox!\n",
2673 mailbox
= hns_roce_alloc_cmd_mailbox(hr_dev
);
2674 if (IS_ERR(mailbox
))
2675 return PTR_ERR(mailbox
);
2677 if (check_whether_last_step(hop_num
, step_idx
)) {
2678 hem
= table
->hem
[hem_idx
];
2679 for (hns_roce_hem_first(hem
, &iter
);
2680 !hns_roce_hem_last(&iter
); hns_roce_hem_next(&iter
)) {
2681 bt_ba
= hns_roce_hem_addr(&iter
);
2683 /* configure the ba, tag, and op */
2684 ret
= hns_roce_cmd_mbox(hr_dev
, bt_ba
, mailbox
->dma
,
2686 HNS_ROCE_CMD_TIMEOUT_MSECS
);
2690 bt_ba
= table
->bt_l0_dma_addr
[i
];
2691 else if (step_idx
== 1 && hop_num
== 2)
2692 bt_ba
= table
->bt_l1_dma_addr
[l1_idx
];
2694 /* configure the ba, tag, and op */
2695 ret
= hns_roce_cmd_mbox(hr_dev
, bt_ba
, mailbox
->dma
, obj
,
2696 0, op
, HNS_ROCE_CMD_TIMEOUT_MSECS
);
2699 hns_roce_free_cmd_mailbox(hr_dev
, mailbox
);
2703 static int hns_roce_v2_clear_hem(struct hns_roce_dev
*hr_dev
,
2704 struct hns_roce_hem_table
*table
, int obj
,
2707 struct device
*dev
= hr_dev
->dev
;
2708 struct hns_roce_cmd_mailbox
*mailbox
;
2712 if (!hns_roce_check_whether_mhop(hr_dev
, table
->type
))
2715 switch (table
->type
) {
2717 op
= HNS_ROCE_CMD_DESTROY_QPC_BT0
;
2720 op
= HNS_ROCE_CMD_DESTROY_MPT_BT0
;
2723 op
= HNS_ROCE_CMD_DESTROY_CQC_BT0
;
2726 op
= HNS_ROCE_CMD_DESTROY_SRQC_BT0
;
2729 dev_warn(dev
, "Table %d not to be destroyed by mailbox!\n",
2735 mailbox
= hns_roce_alloc_cmd_mailbox(hr_dev
);
2736 if (IS_ERR(mailbox
))
2737 return PTR_ERR(mailbox
);
2739 /* configure the tag and op */
2740 ret
= hns_roce_cmd_mbox(hr_dev
, 0, mailbox
->dma
, obj
, 0, op
,
2741 HNS_ROCE_CMD_TIMEOUT_MSECS
);
2743 hns_roce_free_cmd_mailbox(hr_dev
, mailbox
);
2747 static int hns_roce_v2_qp_modify(struct hns_roce_dev
*hr_dev
,
2748 struct hns_roce_mtt
*mtt
,
2749 enum ib_qp_state cur_state
,
2750 enum ib_qp_state new_state
,
2751 struct hns_roce_v2_qp_context
*context
,
2752 struct hns_roce_qp
*hr_qp
)
2754 struct hns_roce_cmd_mailbox
*mailbox
;
2757 mailbox
= hns_roce_alloc_cmd_mailbox(hr_dev
);
2758 if (IS_ERR(mailbox
))
2759 return PTR_ERR(mailbox
);
2761 memcpy(mailbox
->buf
, context
, sizeof(*context
) * 2);
2763 ret
= hns_roce_cmd_mbox(hr_dev
, mailbox
->dma
, 0, hr_qp
->qpn
, 0,
2764 HNS_ROCE_CMD_MODIFY_QPC
,
2765 HNS_ROCE_CMD_TIMEOUT_MSECS
);
2767 hns_roce_free_cmd_mailbox(hr_dev
, mailbox
);
2772 static void set_access_flags(struct hns_roce_qp
*hr_qp
,
2773 struct hns_roce_v2_qp_context
*context
,
2774 struct hns_roce_v2_qp_context
*qpc_mask
,
2775 const struct ib_qp_attr
*attr
, int attr_mask
)
2780 dest_rd_atomic
= (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
) ?
2781 attr
->max_dest_rd_atomic
: hr_qp
->resp_depth
;
2783 access_flags
= (attr_mask
& IB_QP_ACCESS_FLAGS
) ?
2784 attr
->qp_access_flags
: hr_qp
->atomic_rd_en
;
2786 if (!dest_rd_atomic
)
2787 access_flags
&= IB_ACCESS_REMOTE_WRITE
;
2789 roce_set_bit(context
->byte_76_srqn_op_en
, V2_QPC_BYTE_76_RRE_S
,
2790 !!(access_flags
& IB_ACCESS_REMOTE_READ
));
2791 roce_set_bit(qpc_mask
->byte_76_srqn_op_en
, V2_QPC_BYTE_76_RRE_S
, 0);
2793 roce_set_bit(context
->byte_76_srqn_op_en
, V2_QPC_BYTE_76_RWE_S
,
2794 !!(access_flags
& IB_ACCESS_REMOTE_WRITE
));
2795 roce_set_bit(qpc_mask
->byte_76_srqn_op_en
, V2_QPC_BYTE_76_RWE_S
, 0);
2797 roce_set_bit(context
->byte_76_srqn_op_en
, V2_QPC_BYTE_76_ATE_S
,
2798 !!(access_flags
& IB_ACCESS_REMOTE_ATOMIC
));
2799 roce_set_bit(qpc_mask
->byte_76_srqn_op_en
, V2_QPC_BYTE_76_ATE_S
, 0);
2802 static void modify_qp_reset_to_init(struct ib_qp
*ibqp
,
2803 const struct ib_qp_attr
*attr
,
2805 struct hns_roce_v2_qp_context
*context
,
2806 struct hns_roce_v2_qp_context
*qpc_mask
)
2808 struct hns_roce_dev
*hr_dev
= to_hr_dev(ibqp
->device
);
2809 struct hns_roce_qp
*hr_qp
= to_hr_qp(ibqp
);
2812 * In v2 engine, software pass context and context mask to hardware
2813 * when modifying qp. If software need modify some fields in context,
2814 * we should set all bits of the relevant fields in context mask to
2815 * 0 at the same time, else set them to 0x1.
2817 roce_set_field(context
->byte_4_sqpn_tst
, V2_QPC_BYTE_4_TST_M
,
2818 V2_QPC_BYTE_4_TST_S
, to_hr_qp_type(hr_qp
->ibqp
.qp_type
));
2819 roce_set_field(qpc_mask
->byte_4_sqpn_tst
, V2_QPC_BYTE_4_TST_M
,
2820 V2_QPC_BYTE_4_TST_S
, 0);
2822 if (ibqp
->qp_type
== IB_QPT_GSI
)
2823 roce_set_field(context
->byte_4_sqpn_tst
,
2824 V2_QPC_BYTE_4_SGE_SHIFT_M
,
2825 V2_QPC_BYTE_4_SGE_SHIFT_S
,
2826 ilog2((unsigned int)hr_qp
->sge
.sge_cnt
));
2828 roce_set_field(context
->byte_4_sqpn_tst
,
2829 V2_QPC_BYTE_4_SGE_SHIFT_M
,
2830 V2_QPC_BYTE_4_SGE_SHIFT_S
,
2831 hr_qp
->sq
.max_gs
> 2 ?
2832 ilog2((unsigned int)hr_qp
->sge
.sge_cnt
) : 0);
2834 roce_set_field(qpc_mask
->byte_4_sqpn_tst
, V2_QPC_BYTE_4_SGE_SHIFT_M
,
2835 V2_QPC_BYTE_4_SGE_SHIFT_S
, 0);
2837 roce_set_field(context
->byte_4_sqpn_tst
, V2_QPC_BYTE_4_SQPN_M
,
2838 V2_QPC_BYTE_4_SQPN_S
, hr_qp
->qpn
);
2839 roce_set_field(qpc_mask
->byte_4_sqpn_tst
, V2_QPC_BYTE_4_SQPN_M
,
2840 V2_QPC_BYTE_4_SQPN_S
, 0);
2842 roce_set_field(context
->byte_16_buf_ba_pg_sz
, V2_QPC_BYTE_16_PD_M
,
2843 V2_QPC_BYTE_16_PD_S
, to_hr_pd(ibqp
->pd
)->pdn
);
2844 roce_set_field(qpc_mask
->byte_16_buf_ba_pg_sz
, V2_QPC_BYTE_16_PD_M
,
2845 V2_QPC_BYTE_16_PD_S
, 0);
2847 roce_set_field(context
->byte_20_smac_sgid_idx
, V2_QPC_BYTE_20_RQWS_M
,
2848 V2_QPC_BYTE_20_RQWS_S
, ilog2(hr_qp
->rq
.max_gs
));
2849 roce_set_field(qpc_mask
->byte_20_smac_sgid_idx
, V2_QPC_BYTE_20_RQWS_M
,
2850 V2_QPC_BYTE_20_RQWS_S
, 0);
2852 roce_set_field(context
->byte_20_smac_sgid_idx
,
2853 V2_QPC_BYTE_20_SQ_SHIFT_M
, V2_QPC_BYTE_20_SQ_SHIFT_S
,
2854 ilog2((unsigned int)hr_qp
->sq
.wqe_cnt
));
2855 roce_set_field(qpc_mask
->byte_20_smac_sgid_idx
,
2856 V2_QPC_BYTE_20_SQ_SHIFT_M
, V2_QPC_BYTE_20_SQ_SHIFT_S
, 0);
2858 roce_set_field(context
->byte_20_smac_sgid_idx
,
2859 V2_QPC_BYTE_20_RQ_SHIFT_M
, V2_QPC_BYTE_20_RQ_SHIFT_S
,
2860 (hr_qp
->ibqp
.qp_type
== IB_QPT_XRC_INI
||
2861 hr_qp
->ibqp
.qp_type
== IB_QPT_XRC_TGT
|| ibqp
->srq
) ? 0 :
2862 ilog2((unsigned int)hr_qp
->rq
.wqe_cnt
));
2863 roce_set_field(qpc_mask
->byte_20_smac_sgid_idx
,
2864 V2_QPC_BYTE_20_RQ_SHIFT_M
, V2_QPC_BYTE_20_RQ_SHIFT_S
, 0);
2866 /* No VLAN need to set 0xFFF */
2867 roce_set_field(context
->byte_24_mtu_tc
, V2_QPC_BYTE_24_VLAN_ID_M
,
2868 V2_QPC_BYTE_24_VLAN_ID_S
, 0xfff);
2869 roce_set_field(qpc_mask
->byte_24_mtu_tc
, V2_QPC_BYTE_24_VLAN_ID_M
,
2870 V2_QPC_BYTE_24_VLAN_ID_S
, 0);
2873 * Set some fields in context to zero, Because the default values
2874 * of all fields in context are zero, we need not set them to 0 again.
2875 * but we should set the relevant fields of context mask to 0.
2877 roce_set_bit(qpc_mask
->byte_56_dqpn_err
, V2_QPC_BYTE_56_SQ_TX_ERR_S
, 0);
2878 roce_set_bit(qpc_mask
->byte_56_dqpn_err
, V2_QPC_BYTE_56_SQ_RX_ERR_S
, 0);
2879 roce_set_bit(qpc_mask
->byte_56_dqpn_err
, V2_QPC_BYTE_56_RQ_TX_ERR_S
, 0);
2880 roce_set_bit(qpc_mask
->byte_56_dqpn_err
, V2_QPC_BYTE_56_RQ_RX_ERR_S
, 0);
2882 roce_set_field(qpc_mask
->byte_60_qpst_tempid
, V2_QPC_BYTE_60_TEMPID_M
,
2883 V2_QPC_BYTE_60_TEMPID_S
, 0);
2885 roce_set_field(qpc_mask
->byte_60_qpst_tempid
,
2886 V2_QPC_BYTE_60_SCC_TOKEN_M
, V2_QPC_BYTE_60_SCC_TOKEN_S
,
2888 roce_set_bit(qpc_mask
->byte_60_qpst_tempid
,
2889 V2_QPC_BYTE_60_SQ_DB_DOING_S
, 0);
2890 roce_set_bit(qpc_mask
->byte_60_qpst_tempid
,
2891 V2_QPC_BYTE_60_RQ_DB_DOING_S
, 0);
2892 roce_set_bit(qpc_mask
->byte_28_at_fl
, V2_QPC_BYTE_28_CNP_TX_FLAG_S
, 0);
2893 roce_set_bit(qpc_mask
->byte_28_at_fl
, V2_QPC_BYTE_28_CE_FLAG_S
, 0);
2895 if (attr_mask
& IB_QP_QKEY
) {
2896 context
->qkey_xrcd
= attr
->qkey
;
2897 qpc_mask
->qkey_xrcd
= 0;
2898 hr_qp
->qkey
= attr
->qkey
;
2901 if (hr_qp
->rdb_en
) {
2902 roce_set_bit(context
->byte_68_rq_db
,
2903 V2_QPC_BYTE_68_RQ_RECORD_EN_S
, 1);
2904 roce_set_bit(qpc_mask
->byte_68_rq_db
,
2905 V2_QPC_BYTE_68_RQ_RECORD_EN_S
, 0);
2908 roce_set_field(context
->byte_68_rq_db
,
2909 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M
,
2910 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S
,
2911 ((u32
)hr_qp
->rdb
.dma
) >> 1);
2912 roce_set_field(qpc_mask
->byte_68_rq_db
,
2913 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M
,
2914 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S
, 0);
2915 context
->rq_db_record_addr
= hr_qp
->rdb
.dma
>> 32;
2916 qpc_mask
->rq_db_record_addr
= 0;
2918 roce_set_bit(context
->byte_76_srqn_op_en
, V2_QPC_BYTE_76_RQIE_S
,
2919 (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_RQ_INLINE
) ? 1 : 0);
2920 roce_set_bit(qpc_mask
->byte_76_srqn_op_en
, V2_QPC_BYTE_76_RQIE_S
, 0);
2922 roce_set_field(context
->byte_80_rnr_rx_cqn
, V2_QPC_BYTE_80_RX_CQN_M
,
2923 V2_QPC_BYTE_80_RX_CQN_S
, to_hr_cq(ibqp
->recv_cq
)->cqn
);
2924 roce_set_field(qpc_mask
->byte_80_rnr_rx_cqn
, V2_QPC_BYTE_80_RX_CQN_M
,
2925 V2_QPC_BYTE_80_RX_CQN_S
, 0);
2927 roce_set_field(context
->byte_76_srqn_op_en
,
2928 V2_QPC_BYTE_76_SRQN_M
, V2_QPC_BYTE_76_SRQN_S
,
2929 to_hr_srq(ibqp
->srq
)->srqn
);
2930 roce_set_field(qpc_mask
->byte_76_srqn_op_en
,
2931 V2_QPC_BYTE_76_SRQN_M
, V2_QPC_BYTE_76_SRQN_S
, 0);
2932 roce_set_bit(context
->byte_76_srqn_op_en
,
2933 V2_QPC_BYTE_76_SRQ_EN_S
, 1);
2934 roce_set_bit(qpc_mask
->byte_76_srqn_op_en
,
2935 V2_QPC_BYTE_76_SRQ_EN_S
, 0);
2938 roce_set_field(qpc_mask
->byte_84_rq_ci_pi
,
2939 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M
,
2940 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S
, 0);
2941 roce_set_field(qpc_mask
->byte_84_rq_ci_pi
,
2942 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M
,
2943 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S
, 0);
2945 roce_set_field(qpc_mask
->byte_92_srq_info
, V2_QPC_BYTE_92_SRQ_INFO_M
,
2946 V2_QPC_BYTE_92_SRQ_INFO_S
, 0);
2948 roce_set_field(qpc_mask
->byte_96_rx_reqmsn
, V2_QPC_BYTE_96_RX_REQ_MSN_M
,
2949 V2_QPC_BYTE_96_RX_REQ_MSN_S
, 0);
2951 roce_set_field(qpc_mask
->byte_104_rq_sge
,
2952 V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_M
,
2953 V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_S
, 0);
2955 roce_set_bit(qpc_mask
->byte_108_rx_reqepsn
,
2956 V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S
, 0);
2957 roce_set_field(qpc_mask
->byte_108_rx_reqepsn
,
2958 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M
,
2959 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S
, 0);
2960 roce_set_bit(qpc_mask
->byte_108_rx_reqepsn
,
2961 V2_QPC_BYTE_108_RX_REQ_RNR_S
, 0);
2963 qpc_mask
->rq_rnr_timer
= 0;
2964 qpc_mask
->rx_msg_len
= 0;
2965 qpc_mask
->rx_rkey_pkt_info
= 0;
2966 qpc_mask
->rx_va
= 0;
2968 roce_set_field(qpc_mask
->byte_132_trrl
, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M
,
2969 V2_QPC_BYTE_132_TRRL_HEAD_MAX_S
, 0);
2970 roce_set_field(qpc_mask
->byte_132_trrl
, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M
,
2971 V2_QPC_BYTE_132_TRRL_TAIL_MAX_S
, 0);
2973 roce_set_bit(qpc_mask
->byte_140_raq
, V2_QPC_BYTE_140_RQ_RTY_WAIT_DO_S
,
2975 roce_set_field(qpc_mask
->byte_140_raq
, V2_QPC_BYTE_140_RAQ_TRRL_HEAD_M
,
2976 V2_QPC_BYTE_140_RAQ_TRRL_HEAD_S
, 0);
2977 roce_set_field(qpc_mask
->byte_140_raq
, V2_QPC_BYTE_140_RAQ_TRRL_TAIL_M
,
2978 V2_QPC_BYTE_140_RAQ_TRRL_TAIL_S
, 0);
2980 roce_set_field(qpc_mask
->byte_144_raq
,
2981 V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_M
,
2982 V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_S
, 0);
2983 roce_set_field(qpc_mask
->byte_144_raq
, V2_QPC_BYTE_144_RAQ_CREDIT_M
,
2984 V2_QPC_BYTE_144_RAQ_CREDIT_S
, 0);
2985 roce_set_bit(qpc_mask
->byte_144_raq
, V2_QPC_BYTE_144_RESP_RTY_FLG_S
, 0);
2987 roce_set_field(qpc_mask
->byte_148_raq
, V2_QPC_BYTE_148_RQ_MSN_M
,
2988 V2_QPC_BYTE_148_RQ_MSN_S
, 0);
2989 roce_set_field(qpc_mask
->byte_148_raq
, V2_QPC_BYTE_148_RAQ_SYNDROME_M
,
2990 V2_QPC_BYTE_148_RAQ_SYNDROME_S
, 0);
2992 roce_set_field(qpc_mask
->byte_152_raq
, V2_QPC_BYTE_152_RAQ_PSN_M
,
2993 V2_QPC_BYTE_152_RAQ_PSN_S
, 0);
2994 roce_set_field(qpc_mask
->byte_152_raq
,
2995 V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_M
,
2996 V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_S
, 0);
2998 roce_set_field(qpc_mask
->byte_156_raq
, V2_QPC_BYTE_156_RAQ_USE_PKTN_M
,
2999 V2_QPC_BYTE_156_RAQ_USE_PKTN_S
, 0);
3001 roce_set_field(qpc_mask
->byte_160_sq_ci_pi
,
3002 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M
,
3003 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S
, 0);
3004 roce_set_field(qpc_mask
->byte_160_sq_ci_pi
,
3005 V2_QPC_BYTE_160_SQ_CONSUMER_IDX_M
,
3006 V2_QPC_BYTE_160_SQ_CONSUMER_IDX_S
, 0);
3008 roce_set_bit(qpc_mask
->byte_168_irrl_idx
,
3009 V2_QPC_BYTE_168_POLL_DB_WAIT_DO_S
, 0);
3010 roce_set_bit(qpc_mask
->byte_168_irrl_idx
,
3011 V2_QPC_BYTE_168_SCC_TOKEN_FORBID_SQ_DEQ_S
, 0);
3012 roce_set_bit(qpc_mask
->byte_168_irrl_idx
,
3013 V2_QPC_BYTE_168_WAIT_ACK_TIMEOUT_S
, 0);
3014 roce_set_bit(qpc_mask
->byte_168_irrl_idx
,
3015 V2_QPC_BYTE_168_MSG_RTY_LP_FLG_S
, 0);
3016 roce_set_bit(qpc_mask
->byte_168_irrl_idx
,
3017 V2_QPC_BYTE_168_SQ_INVLD_FLG_S
, 0);
3018 roce_set_field(qpc_mask
->byte_168_irrl_idx
,
3019 V2_QPC_BYTE_168_IRRL_IDX_LSB_M
,
3020 V2_QPC_BYTE_168_IRRL_IDX_LSB_S
, 0);
3022 roce_set_field(context
->byte_172_sq_psn
, V2_QPC_BYTE_172_ACK_REQ_FREQ_M
,
3023 V2_QPC_BYTE_172_ACK_REQ_FREQ_S
, 4);
3024 roce_set_field(qpc_mask
->byte_172_sq_psn
,
3025 V2_QPC_BYTE_172_ACK_REQ_FREQ_M
,
3026 V2_QPC_BYTE_172_ACK_REQ_FREQ_S
, 0);
3028 roce_set_bit(qpc_mask
->byte_172_sq_psn
, V2_QPC_BYTE_172_MSG_RNR_FLG_S
,
3031 roce_set_bit(context
->byte_172_sq_psn
, V2_QPC_BYTE_172_FRE_S
, 1);
3032 roce_set_bit(qpc_mask
->byte_172_sq_psn
, V2_QPC_BYTE_172_FRE_S
, 0);
3034 roce_set_field(qpc_mask
->byte_176_msg_pktn
,
3035 V2_QPC_BYTE_176_MSG_USE_PKTN_M
,
3036 V2_QPC_BYTE_176_MSG_USE_PKTN_S
, 0);
3037 roce_set_field(qpc_mask
->byte_176_msg_pktn
,
3038 V2_QPC_BYTE_176_IRRL_HEAD_PRE_M
,
3039 V2_QPC_BYTE_176_IRRL_HEAD_PRE_S
, 0);
3041 roce_set_field(qpc_mask
->byte_184_irrl_idx
,
3042 V2_QPC_BYTE_184_IRRL_IDX_MSB_M
,
3043 V2_QPC_BYTE_184_IRRL_IDX_MSB_S
, 0);
3045 qpc_mask
->cur_sge_offset
= 0;
3047 roce_set_field(qpc_mask
->byte_192_ext_sge
,
3048 V2_QPC_BYTE_192_CUR_SGE_IDX_M
,
3049 V2_QPC_BYTE_192_CUR_SGE_IDX_S
, 0);
3050 roce_set_field(qpc_mask
->byte_192_ext_sge
,
3051 V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_M
,
3052 V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_S
, 0);
3054 roce_set_field(qpc_mask
->byte_196_sq_psn
, V2_QPC_BYTE_196_IRRL_HEAD_M
,
3055 V2_QPC_BYTE_196_IRRL_HEAD_S
, 0);
3057 roce_set_field(qpc_mask
->byte_200_sq_max
, V2_QPC_BYTE_200_SQ_MAX_IDX_M
,
3058 V2_QPC_BYTE_200_SQ_MAX_IDX_S
, 0);
3059 roce_set_field(qpc_mask
->byte_200_sq_max
,
3060 V2_QPC_BYTE_200_LCL_OPERATED_CNT_M
,
3061 V2_QPC_BYTE_200_LCL_OPERATED_CNT_S
, 0);
3063 roce_set_bit(qpc_mask
->byte_208_irrl
, V2_QPC_BYTE_208_PKT_RNR_FLG_S
, 0);
3064 roce_set_bit(qpc_mask
->byte_208_irrl
, V2_QPC_BYTE_208_PKT_RTY_FLG_S
, 0);
3066 roce_set_field(qpc_mask
->byte_212_lsn
, V2_QPC_BYTE_212_CHECK_FLG_M
,
3067 V2_QPC_BYTE_212_CHECK_FLG_S
, 0);
3069 qpc_mask
->sq_timer
= 0;
3071 roce_set_field(qpc_mask
->byte_220_retry_psn_msn
,
3072 V2_QPC_BYTE_220_RETRY_MSG_MSN_M
,
3073 V2_QPC_BYTE_220_RETRY_MSG_MSN_S
, 0);
3074 roce_set_field(qpc_mask
->byte_232_irrl_sge
,
3075 V2_QPC_BYTE_232_IRRL_SGE_IDX_M
,
3076 V2_QPC_BYTE_232_IRRL_SGE_IDX_S
, 0);
3078 roce_set_bit(qpc_mask
->byte_232_irrl_sge
, V2_QPC_BYTE_232_SO_LP_VLD_S
,
3080 roce_set_bit(qpc_mask
->byte_232_irrl_sge
,
3081 V2_QPC_BYTE_232_FENCE_LP_VLD_S
, 0);
3082 roce_set_bit(qpc_mask
->byte_232_irrl_sge
, V2_QPC_BYTE_232_IRRL_LP_VLD_S
,
3085 qpc_mask
->irrl_cur_sge_offset
= 0;
3087 roce_set_field(qpc_mask
->byte_240_irrl_tail
,
3088 V2_QPC_BYTE_240_IRRL_TAIL_REAL_M
,
3089 V2_QPC_BYTE_240_IRRL_TAIL_REAL_S
, 0);
3090 roce_set_field(qpc_mask
->byte_240_irrl_tail
,
3091 V2_QPC_BYTE_240_IRRL_TAIL_RD_M
,
3092 V2_QPC_BYTE_240_IRRL_TAIL_RD_S
, 0);
3093 roce_set_field(qpc_mask
->byte_240_irrl_tail
,
3094 V2_QPC_BYTE_240_RX_ACK_MSN_M
,
3095 V2_QPC_BYTE_240_RX_ACK_MSN_S
, 0);
3097 roce_set_field(qpc_mask
->byte_248_ack_psn
, V2_QPC_BYTE_248_IRRL_PSN_M
,
3098 V2_QPC_BYTE_248_IRRL_PSN_S
, 0);
3099 roce_set_bit(qpc_mask
->byte_248_ack_psn
, V2_QPC_BYTE_248_ACK_PSN_ERR_S
,
3101 roce_set_field(qpc_mask
->byte_248_ack_psn
,
3102 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M
,
3103 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S
, 0);
3104 roce_set_bit(qpc_mask
->byte_248_ack_psn
, V2_QPC_BYTE_248_IRRL_PSN_VLD_S
,
3106 roce_set_bit(qpc_mask
->byte_248_ack_psn
,
3107 V2_QPC_BYTE_248_RNR_RETRY_FLAG_S
, 0);
3108 roce_set_bit(qpc_mask
->byte_248_ack_psn
, V2_QPC_BYTE_248_CQ_ERR_IND_S
,
3111 hr_qp
->access_flags
= attr
->qp_access_flags
;
3112 hr_qp
->pkey_index
= attr
->pkey_index
;
3113 roce_set_field(context
->byte_252_err_txcqn
, V2_QPC_BYTE_252_TX_CQN_M
,
3114 V2_QPC_BYTE_252_TX_CQN_S
, to_hr_cq(ibqp
->send_cq
)->cqn
);
3115 roce_set_field(qpc_mask
->byte_252_err_txcqn
, V2_QPC_BYTE_252_TX_CQN_M
,
3116 V2_QPC_BYTE_252_TX_CQN_S
, 0);
3118 roce_set_field(qpc_mask
->byte_252_err_txcqn
, V2_QPC_BYTE_252_ERR_TYPE_M
,
3119 V2_QPC_BYTE_252_ERR_TYPE_S
, 0);
3121 roce_set_field(qpc_mask
->byte_256_sqflush_rqcqe
,
3122 V2_QPC_BYTE_256_RQ_CQE_IDX_M
,
3123 V2_QPC_BYTE_256_RQ_CQE_IDX_S
, 0);
3124 roce_set_field(qpc_mask
->byte_256_sqflush_rqcqe
,
3125 V2_QPC_BYTE_256_SQ_FLUSH_IDX_M
,
3126 V2_QPC_BYTE_256_SQ_FLUSH_IDX_S
, 0);
3129 static void modify_qp_init_to_init(struct ib_qp
*ibqp
,
3130 const struct ib_qp_attr
*attr
, int attr_mask
,
3131 struct hns_roce_v2_qp_context
*context
,
3132 struct hns_roce_v2_qp_context
*qpc_mask
)
3134 struct hns_roce_qp
*hr_qp
= to_hr_qp(ibqp
);
3137 * In v2 engine, software pass context and context mask to hardware
3138 * when modifying qp. If software need modify some fields in context,
3139 * we should set all bits of the relevant fields in context mask to
3140 * 0 at the same time, else set them to 0x1.
3142 roce_set_field(context
->byte_4_sqpn_tst
, V2_QPC_BYTE_4_TST_M
,
3143 V2_QPC_BYTE_4_TST_S
, to_hr_qp_type(hr_qp
->ibqp
.qp_type
));
3144 roce_set_field(qpc_mask
->byte_4_sqpn_tst
, V2_QPC_BYTE_4_TST_M
,
3145 V2_QPC_BYTE_4_TST_S
, 0);
3147 if (ibqp
->qp_type
== IB_QPT_GSI
)
3148 roce_set_field(context
->byte_4_sqpn_tst
,
3149 V2_QPC_BYTE_4_SGE_SHIFT_M
,
3150 V2_QPC_BYTE_4_SGE_SHIFT_S
,
3151 ilog2((unsigned int)hr_qp
->sge
.sge_cnt
));
3153 roce_set_field(context
->byte_4_sqpn_tst
,
3154 V2_QPC_BYTE_4_SGE_SHIFT_M
,
3155 V2_QPC_BYTE_4_SGE_SHIFT_S
, hr_qp
->sq
.max_gs
> 2 ?
3156 ilog2((unsigned int)hr_qp
->sge
.sge_cnt
) : 0);
3158 roce_set_field(qpc_mask
->byte_4_sqpn_tst
, V2_QPC_BYTE_4_SGE_SHIFT_M
,
3159 V2_QPC_BYTE_4_SGE_SHIFT_S
, 0);
3161 if (attr_mask
& IB_QP_ACCESS_FLAGS
) {
3162 roce_set_bit(context
->byte_76_srqn_op_en
, V2_QPC_BYTE_76_RRE_S
,
3163 !!(attr
->qp_access_flags
& IB_ACCESS_REMOTE_READ
));
3164 roce_set_bit(qpc_mask
->byte_76_srqn_op_en
, V2_QPC_BYTE_76_RRE_S
,
3167 roce_set_bit(context
->byte_76_srqn_op_en
, V2_QPC_BYTE_76_RWE_S
,
3168 !!(attr
->qp_access_flags
&
3169 IB_ACCESS_REMOTE_WRITE
));
3170 roce_set_bit(qpc_mask
->byte_76_srqn_op_en
, V2_QPC_BYTE_76_RWE_S
,
3173 roce_set_bit(context
->byte_76_srqn_op_en
, V2_QPC_BYTE_76_ATE_S
,
3174 !!(attr
->qp_access_flags
&
3175 IB_ACCESS_REMOTE_ATOMIC
));
3176 roce_set_bit(qpc_mask
->byte_76_srqn_op_en
, V2_QPC_BYTE_76_ATE_S
,
3179 roce_set_bit(context
->byte_76_srqn_op_en
, V2_QPC_BYTE_76_RRE_S
,
3180 !!(hr_qp
->access_flags
& IB_ACCESS_REMOTE_READ
));
3181 roce_set_bit(qpc_mask
->byte_76_srqn_op_en
, V2_QPC_BYTE_76_RRE_S
,
3184 roce_set_bit(context
->byte_76_srqn_op_en
, V2_QPC_BYTE_76_RWE_S
,
3185 !!(hr_qp
->access_flags
& IB_ACCESS_REMOTE_WRITE
));
3186 roce_set_bit(qpc_mask
->byte_76_srqn_op_en
, V2_QPC_BYTE_76_RWE_S
,
3189 roce_set_bit(context
->byte_76_srqn_op_en
, V2_QPC_BYTE_76_ATE_S
,
3190 !!(hr_qp
->access_flags
& IB_ACCESS_REMOTE_ATOMIC
));
3191 roce_set_bit(qpc_mask
->byte_76_srqn_op_en
, V2_QPC_BYTE_76_ATE_S
,
3195 roce_set_field(context
->byte_20_smac_sgid_idx
,
3196 V2_QPC_BYTE_20_SQ_SHIFT_M
, V2_QPC_BYTE_20_SQ_SHIFT_S
,
3197 ilog2((unsigned int)hr_qp
->sq
.wqe_cnt
));
3198 roce_set_field(qpc_mask
->byte_20_smac_sgid_idx
,
3199 V2_QPC_BYTE_20_SQ_SHIFT_M
, V2_QPC_BYTE_20_SQ_SHIFT_S
, 0);
3201 roce_set_field(context
->byte_20_smac_sgid_idx
,
3202 V2_QPC_BYTE_20_RQ_SHIFT_M
, V2_QPC_BYTE_20_RQ_SHIFT_S
,
3203 (hr_qp
->ibqp
.qp_type
== IB_QPT_XRC_INI
||
3204 hr_qp
->ibqp
.qp_type
== IB_QPT_XRC_TGT
|| ibqp
->srq
) ? 0 :
3205 ilog2((unsigned int)hr_qp
->rq
.wqe_cnt
));
3206 roce_set_field(qpc_mask
->byte_20_smac_sgid_idx
,
3207 V2_QPC_BYTE_20_RQ_SHIFT_M
, V2_QPC_BYTE_20_RQ_SHIFT_S
, 0);
3209 roce_set_field(context
->byte_16_buf_ba_pg_sz
, V2_QPC_BYTE_16_PD_M
,
3210 V2_QPC_BYTE_16_PD_S
, to_hr_pd(ibqp
->pd
)->pdn
);
3211 roce_set_field(qpc_mask
->byte_16_buf_ba_pg_sz
, V2_QPC_BYTE_16_PD_M
,
3212 V2_QPC_BYTE_16_PD_S
, 0);
3214 roce_set_field(context
->byte_80_rnr_rx_cqn
, V2_QPC_BYTE_80_RX_CQN_M
,
3215 V2_QPC_BYTE_80_RX_CQN_S
, to_hr_cq(ibqp
->recv_cq
)->cqn
);
3216 roce_set_field(qpc_mask
->byte_80_rnr_rx_cqn
, V2_QPC_BYTE_80_RX_CQN_M
,
3217 V2_QPC_BYTE_80_RX_CQN_S
, 0);
3219 roce_set_field(context
->byte_252_err_txcqn
, V2_QPC_BYTE_252_TX_CQN_M
,
3220 V2_QPC_BYTE_252_TX_CQN_S
, to_hr_cq(ibqp
->send_cq
)->cqn
);
3221 roce_set_field(qpc_mask
->byte_252_err_txcqn
, V2_QPC_BYTE_252_TX_CQN_M
,
3222 V2_QPC_BYTE_252_TX_CQN_S
, 0);
3225 roce_set_bit(context
->byte_76_srqn_op_en
,
3226 V2_QPC_BYTE_76_SRQ_EN_S
, 1);
3227 roce_set_bit(qpc_mask
->byte_76_srqn_op_en
,
3228 V2_QPC_BYTE_76_SRQ_EN_S
, 0);
3229 roce_set_field(context
->byte_76_srqn_op_en
,
3230 V2_QPC_BYTE_76_SRQN_M
, V2_QPC_BYTE_76_SRQN_S
,
3231 to_hr_srq(ibqp
->srq
)->srqn
);
3232 roce_set_field(qpc_mask
->byte_76_srqn_op_en
,
3233 V2_QPC_BYTE_76_SRQN_M
, V2_QPC_BYTE_76_SRQN_S
, 0);
3236 if (attr_mask
& IB_QP_QKEY
) {
3237 context
->qkey_xrcd
= attr
->qkey
;
3238 qpc_mask
->qkey_xrcd
= 0;
3241 roce_set_field(context
->byte_4_sqpn_tst
, V2_QPC_BYTE_4_SQPN_M
,
3242 V2_QPC_BYTE_4_SQPN_S
, hr_qp
->qpn
);
3243 roce_set_field(qpc_mask
->byte_4_sqpn_tst
, V2_QPC_BYTE_4_SQPN_M
,
3244 V2_QPC_BYTE_4_SQPN_S
, 0);
3246 if (attr_mask
& IB_QP_DEST_QPN
) {
3247 roce_set_field(context
->byte_56_dqpn_err
, V2_QPC_BYTE_56_DQPN_M
,
3248 V2_QPC_BYTE_56_DQPN_S
, hr_qp
->qpn
);
3249 roce_set_field(qpc_mask
->byte_56_dqpn_err
,
3250 V2_QPC_BYTE_56_DQPN_M
, V2_QPC_BYTE_56_DQPN_S
, 0);
3254 static int modify_qp_init_to_rtr(struct ib_qp
*ibqp
,
3255 const struct ib_qp_attr
*attr
, int attr_mask
,
3256 struct hns_roce_v2_qp_context
*context
,
3257 struct hns_roce_v2_qp_context
*qpc_mask
)
3259 const struct ib_global_route
*grh
= rdma_ah_read_grh(&attr
->ah_attr
);
3260 struct hns_roce_dev
*hr_dev
= to_hr_dev(ibqp
->device
);
3261 struct hns_roce_qp
*hr_qp
= to_hr_qp(ibqp
);
3262 struct device
*dev
= hr_dev
->dev
;
3263 dma_addr_t dma_handle_3
;
3264 dma_addr_t dma_handle_2
;
3265 dma_addr_t dma_handle
;
3275 /* Search qp buf's mtts */
3276 mtts
= hns_roce_table_find(hr_dev
, &hr_dev
->mr_table
.mtt_table
,
3277 hr_qp
->mtt
.first_seg
, &dma_handle
);
3279 dev_err(dev
, "qp buf pa find failed\n");
3283 /* Search IRRL's mtts */
3284 mtts_2
= hns_roce_table_find(hr_dev
, &hr_dev
->qp_table
.irrl_table
,
3285 hr_qp
->qpn
, &dma_handle_2
);
3287 dev_err(dev
, "qp irrl_table find failed\n");
3291 /* Search TRRL's mtts */
3292 mtts_3
= hns_roce_table_find(hr_dev
, &hr_dev
->qp_table
.trrl_table
,
3293 hr_qp
->qpn
, &dma_handle_3
);
3295 dev_err(dev
, "qp trrl_table find failed\n");
3299 if (attr_mask
& IB_QP_ALT_PATH
) {
3300 dev_err(dev
, "INIT2RTR attr_mask (0x%x) error\n", attr_mask
);
3304 dmac
= (u8
*)attr
->ah_attr
.roce
.dmac
;
3305 context
->wqe_sge_ba
= (u32
)(dma_handle
>> 3);
3306 qpc_mask
->wqe_sge_ba
= 0;
3309 * In v2 engine, software pass context and context mask to hardware
3310 * when modifying qp. If software need modify some fields in context,
3311 * we should set all bits of the relevant fields in context mask to
3312 * 0 at the same time, else set them to 0x1.
3314 roce_set_field(context
->byte_12_sq_hop
, V2_QPC_BYTE_12_WQE_SGE_BA_M
,
3315 V2_QPC_BYTE_12_WQE_SGE_BA_S
, dma_handle
>> (32 + 3));
3316 roce_set_field(qpc_mask
->byte_12_sq_hop
, V2_QPC_BYTE_12_WQE_SGE_BA_M
,
3317 V2_QPC_BYTE_12_WQE_SGE_BA_S
, 0);
3319 roce_set_field(context
->byte_12_sq_hop
, V2_QPC_BYTE_12_SQ_HOP_NUM_M
,
3320 V2_QPC_BYTE_12_SQ_HOP_NUM_S
,
3321 hr_dev
->caps
.mtt_hop_num
== HNS_ROCE_HOP_NUM_0
?
3322 0 : hr_dev
->caps
.mtt_hop_num
);
3323 roce_set_field(qpc_mask
->byte_12_sq_hop
, V2_QPC_BYTE_12_SQ_HOP_NUM_M
,
3324 V2_QPC_BYTE_12_SQ_HOP_NUM_S
, 0);
3326 roce_set_field(context
->byte_20_smac_sgid_idx
,
3327 V2_QPC_BYTE_20_SGE_HOP_NUM_M
,
3328 V2_QPC_BYTE_20_SGE_HOP_NUM_S
,
3329 ((ibqp
->qp_type
== IB_QPT_GSI
) || hr_qp
->sq
.max_gs
> 2) ?
3330 hr_dev
->caps
.mtt_hop_num
: 0);
3331 roce_set_field(qpc_mask
->byte_20_smac_sgid_idx
,
3332 V2_QPC_BYTE_20_SGE_HOP_NUM_M
,
3333 V2_QPC_BYTE_20_SGE_HOP_NUM_S
, 0);
3335 roce_set_field(context
->byte_20_smac_sgid_idx
,
3336 V2_QPC_BYTE_20_RQ_HOP_NUM_M
,
3337 V2_QPC_BYTE_20_RQ_HOP_NUM_S
,
3338 hr_dev
->caps
.mtt_hop_num
== HNS_ROCE_HOP_NUM_0
?
3339 0 : hr_dev
->caps
.mtt_hop_num
);
3340 roce_set_field(qpc_mask
->byte_20_smac_sgid_idx
,
3341 V2_QPC_BYTE_20_RQ_HOP_NUM_M
,
3342 V2_QPC_BYTE_20_RQ_HOP_NUM_S
, 0);
3344 roce_set_field(context
->byte_16_buf_ba_pg_sz
,
3345 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M
,
3346 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S
,
3347 hr_dev
->caps
.mtt_ba_pg_sz
+ PG_SHIFT_OFFSET
);
3348 roce_set_field(qpc_mask
->byte_16_buf_ba_pg_sz
,
3349 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M
,
3350 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S
, 0);
3352 roce_set_field(context
->byte_16_buf_ba_pg_sz
,
3353 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M
,
3354 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S
,
3355 hr_dev
->caps
.mtt_buf_pg_sz
+ PG_SHIFT_OFFSET
);
3356 roce_set_field(qpc_mask
->byte_16_buf_ba_pg_sz
,
3357 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M
,
3358 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S
, 0);
3360 roce_set_field(context
->byte_80_rnr_rx_cqn
,
3361 V2_QPC_BYTE_80_MIN_RNR_TIME_M
,
3362 V2_QPC_BYTE_80_MIN_RNR_TIME_S
, attr
->min_rnr_timer
);
3363 roce_set_field(qpc_mask
->byte_80_rnr_rx_cqn
,
3364 V2_QPC_BYTE_80_MIN_RNR_TIME_M
,
3365 V2_QPC_BYTE_80_MIN_RNR_TIME_S
, 0);
3367 page_size
= 1 << (hr_dev
->caps
.mtt_buf_pg_sz
+ PAGE_SHIFT
);
3368 context
->rq_cur_blk_addr
= (u32
)(mtts
[hr_qp
->rq
.offset
/ page_size
]
3369 >> PAGE_ADDR_SHIFT
);
3370 qpc_mask
->rq_cur_blk_addr
= 0;
3372 roce_set_field(context
->byte_92_srq_info
,
3373 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M
,
3374 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S
,
3375 mtts
[hr_qp
->rq
.offset
/ page_size
]
3376 >> (32 + PAGE_ADDR_SHIFT
));
3377 roce_set_field(qpc_mask
->byte_92_srq_info
,
3378 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M
,
3379 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S
, 0);
3381 context
->rq_nxt_blk_addr
= (u32
)(mtts
[hr_qp
->rq
.offset
/ page_size
+ 1]
3382 >> PAGE_ADDR_SHIFT
);
3383 qpc_mask
->rq_nxt_blk_addr
= 0;
3385 roce_set_field(context
->byte_104_rq_sge
,
3386 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M
,
3387 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S
,
3388 mtts
[hr_qp
->rq
.offset
/ page_size
+ 1]
3389 >> (32 + PAGE_ADDR_SHIFT
));
3390 roce_set_field(qpc_mask
->byte_104_rq_sge
,
3391 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M
,
3392 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S
, 0);
3394 roce_set_field(context
->byte_108_rx_reqepsn
,
3395 V2_QPC_BYTE_108_RX_REQ_EPSN_M
,
3396 V2_QPC_BYTE_108_RX_REQ_EPSN_S
, attr
->rq_psn
);
3397 roce_set_field(qpc_mask
->byte_108_rx_reqepsn
,
3398 V2_QPC_BYTE_108_RX_REQ_EPSN_M
,
3399 V2_QPC_BYTE_108_RX_REQ_EPSN_S
, 0);
3401 roce_set_field(context
->byte_132_trrl
, V2_QPC_BYTE_132_TRRL_BA_M
,
3402 V2_QPC_BYTE_132_TRRL_BA_S
, dma_handle_3
>> 4);
3403 roce_set_field(qpc_mask
->byte_132_trrl
, V2_QPC_BYTE_132_TRRL_BA_M
,
3404 V2_QPC_BYTE_132_TRRL_BA_S
, 0);
3405 context
->trrl_ba
= (u32
)(dma_handle_3
>> (16 + 4));
3406 qpc_mask
->trrl_ba
= 0;
3407 roce_set_field(context
->byte_140_raq
, V2_QPC_BYTE_140_TRRL_BA_M
,
3408 V2_QPC_BYTE_140_TRRL_BA_S
,
3409 (u32
)(dma_handle_3
>> (32 + 16 + 4)));
3410 roce_set_field(qpc_mask
->byte_140_raq
, V2_QPC_BYTE_140_TRRL_BA_M
,
3411 V2_QPC_BYTE_140_TRRL_BA_S
, 0);
3413 context
->irrl_ba
= (u32
)(dma_handle_2
>> 6);
3414 qpc_mask
->irrl_ba
= 0;
3415 roce_set_field(context
->byte_208_irrl
, V2_QPC_BYTE_208_IRRL_BA_M
,
3416 V2_QPC_BYTE_208_IRRL_BA_S
,
3417 dma_handle_2
>> (32 + 6));
3418 roce_set_field(qpc_mask
->byte_208_irrl
, V2_QPC_BYTE_208_IRRL_BA_M
,
3419 V2_QPC_BYTE_208_IRRL_BA_S
, 0);
3421 roce_set_bit(context
->byte_208_irrl
, V2_QPC_BYTE_208_RMT_E2E_S
, 1);
3422 roce_set_bit(qpc_mask
->byte_208_irrl
, V2_QPC_BYTE_208_RMT_E2E_S
, 0);
3424 roce_set_bit(context
->byte_252_err_txcqn
, V2_QPC_BYTE_252_SIG_TYPE_S
,
3425 hr_qp
->sq_signal_bits
);
3426 roce_set_bit(qpc_mask
->byte_252_err_txcqn
, V2_QPC_BYTE_252_SIG_TYPE_S
,
3429 port
= (attr_mask
& IB_QP_PORT
) ? (attr
->port_num
- 1) : hr_qp
->port
;
3431 smac
= (u8
*)hr_dev
->dev_addr
[port
];
3432 /* when dmac equals smac or loop_idc is 1, it should loopback */
3433 if (ether_addr_equal_unaligned(dmac
, smac
) ||
3434 hr_dev
->loop_idc
== 0x1) {
3435 roce_set_bit(context
->byte_28_at_fl
, V2_QPC_BYTE_28_LBI_S
, 1);
3436 roce_set_bit(qpc_mask
->byte_28_at_fl
, V2_QPC_BYTE_28_LBI_S
, 0);
3439 if ((attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
) &&
3440 attr
->max_dest_rd_atomic
) {
3441 roce_set_field(context
->byte_140_raq
, V2_QPC_BYTE_140_RR_MAX_M
,
3442 V2_QPC_BYTE_140_RR_MAX_S
,
3443 fls(attr
->max_dest_rd_atomic
- 1));
3444 roce_set_field(qpc_mask
->byte_140_raq
, V2_QPC_BYTE_140_RR_MAX_M
,
3445 V2_QPC_BYTE_140_RR_MAX_S
, 0);
3448 if (attr_mask
& IB_QP_DEST_QPN
) {
3449 roce_set_field(context
->byte_56_dqpn_err
, V2_QPC_BYTE_56_DQPN_M
,
3450 V2_QPC_BYTE_56_DQPN_S
, attr
->dest_qp_num
);
3451 roce_set_field(qpc_mask
->byte_56_dqpn_err
,
3452 V2_QPC_BYTE_56_DQPN_M
, V2_QPC_BYTE_56_DQPN_S
, 0);
3455 /* Configure GID index */
3456 port_num
= rdma_ah_get_port_num(&attr
->ah_attr
);
3457 roce_set_field(context
->byte_20_smac_sgid_idx
,
3458 V2_QPC_BYTE_20_SGID_IDX_M
,
3459 V2_QPC_BYTE_20_SGID_IDX_S
,
3460 hns_get_gid_index(hr_dev
, port_num
- 1,
3462 roce_set_field(qpc_mask
->byte_20_smac_sgid_idx
,
3463 V2_QPC_BYTE_20_SGID_IDX_M
,
3464 V2_QPC_BYTE_20_SGID_IDX_S
, 0);
3465 memcpy(&(context
->dmac
), dmac
, 4);
3466 roce_set_field(context
->byte_52_udpspn_dmac
, V2_QPC_BYTE_52_DMAC_M
,
3467 V2_QPC_BYTE_52_DMAC_S
, *((u16
*)(&dmac
[4])));
3469 roce_set_field(qpc_mask
->byte_52_udpspn_dmac
, V2_QPC_BYTE_52_DMAC_M
,
3470 V2_QPC_BYTE_52_DMAC_S
, 0);
3472 roce_set_field(context
->byte_56_dqpn_err
, V2_QPC_BYTE_56_LP_PKTN_INI_M
,
3473 V2_QPC_BYTE_56_LP_PKTN_INI_S
, 4);
3474 roce_set_field(qpc_mask
->byte_56_dqpn_err
, V2_QPC_BYTE_56_LP_PKTN_INI_M
,
3475 V2_QPC_BYTE_56_LP_PKTN_INI_S
, 0);
3477 if (ibqp
->qp_type
== IB_QPT_GSI
|| ibqp
->qp_type
== IB_QPT_UD
)
3478 roce_set_field(context
->byte_24_mtu_tc
, V2_QPC_BYTE_24_MTU_M
,
3479 V2_QPC_BYTE_24_MTU_S
, IB_MTU_4096
);
3480 else if (attr_mask
& IB_QP_PATH_MTU
)
3481 roce_set_field(context
->byte_24_mtu_tc
, V2_QPC_BYTE_24_MTU_M
,
3482 V2_QPC_BYTE_24_MTU_S
, attr
->path_mtu
);
3484 roce_set_field(qpc_mask
->byte_24_mtu_tc
, V2_QPC_BYTE_24_MTU_M
,
3485 V2_QPC_BYTE_24_MTU_S
, 0);
3487 roce_set_field(context
->byte_84_rq_ci_pi
,
3488 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M
,
3489 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S
, hr_qp
->rq
.head
);
3490 roce_set_field(qpc_mask
->byte_84_rq_ci_pi
,
3491 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M
,
3492 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S
, 0);
3494 roce_set_field(qpc_mask
->byte_84_rq_ci_pi
,
3495 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M
,
3496 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S
, 0);
3497 roce_set_bit(qpc_mask
->byte_108_rx_reqepsn
,
3498 V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S
, 0);
3499 roce_set_field(qpc_mask
->byte_96_rx_reqmsn
, V2_QPC_BYTE_96_RX_REQ_MSN_M
,
3500 V2_QPC_BYTE_96_RX_REQ_MSN_S
, 0);
3501 roce_set_field(qpc_mask
->byte_108_rx_reqepsn
,
3502 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M
,
3503 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S
, 0);
3505 context
->rq_rnr_timer
= 0;
3506 qpc_mask
->rq_rnr_timer
= 0;
3508 roce_set_field(context
->byte_152_raq
, V2_QPC_BYTE_152_RAQ_PSN_M
,
3509 V2_QPC_BYTE_152_RAQ_PSN_S
, attr
->rq_psn
- 1);
3510 roce_set_field(qpc_mask
->byte_152_raq
, V2_QPC_BYTE_152_RAQ_PSN_M
,
3511 V2_QPC_BYTE_152_RAQ_PSN_S
, 0);
3513 roce_set_field(qpc_mask
->byte_132_trrl
, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M
,
3514 V2_QPC_BYTE_132_TRRL_HEAD_MAX_S
, 0);
3515 roce_set_field(qpc_mask
->byte_132_trrl
, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M
,
3516 V2_QPC_BYTE_132_TRRL_TAIL_MAX_S
, 0);
3518 roce_set_field(context
->byte_168_irrl_idx
,
3519 V2_QPC_BYTE_168_LP_SGEN_INI_M
,
3520 V2_QPC_BYTE_168_LP_SGEN_INI_S
, 3);
3521 roce_set_field(qpc_mask
->byte_168_irrl_idx
,
3522 V2_QPC_BYTE_168_LP_SGEN_INI_M
,
3523 V2_QPC_BYTE_168_LP_SGEN_INI_S
, 0);
3528 static int modify_qp_rtr_to_rts(struct ib_qp
*ibqp
,
3529 const struct ib_qp_attr
*attr
, int attr_mask
,
3530 struct hns_roce_v2_qp_context
*context
,
3531 struct hns_roce_v2_qp_context
*qpc_mask
)
3533 struct hns_roce_dev
*hr_dev
= to_hr_dev(ibqp
->device
);
3534 struct hns_roce_qp
*hr_qp
= to_hr_qp(ibqp
);
3535 struct device
*dev
= hr_dev
->dev
;
3536 dma_addr_t dma_handle
;
3540 /* Search qp buf's mtts */
3541 mtts
= hns_roce_table_find(hr_dev
, &hr_dev
->mr_table
.mtt_table
,
3542 hr_qp
->mtt
.first_seg
, &dma_handle
);
3544 dev_err(dev
, "qp buf pa find failed\n");
3548 /* Not support alternate path and path migration */
3549 if ((attr_mask
& IB_QP_ALT_PATH
) ||
3550 (attr_mask
& IB_QP_PATH_MIG_STATE
)) {
3551 dev_err(dev
, "RTR2RTS attr_mask (0x%x)error\n", attr_mask
);
3556 * In v2 engine, software pass context and context mask to hardware
3557 * when modifying qp. If software need modify some fields in context,
3558 * we should set all bits of the relevant fields in context mask to
3559 * 0 at the same time, else set them to 0x1.
3561 context
->sq_cur_blk_addr
= (u32
)(mtts
[0] >> PAGE_ADDR_SHIFT
);
3562 roce_set_field(context
->byte_168_irrl_idx
,
3563 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M
,
3564 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S
,
3565 mtts
[0] >> (32 + PAGE_ADDR_SHIFT
));
3566 qpc_mask
->sq_cur_blk_addr
= 0;
3567 roce_set_field(qpc_mask
->byte_168_irrl_idx
,
3568 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M
,
3569 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S
, 0);
3571 page_size
= 1 << (hr_dev
->caps
.mtt_buf_pg_sz
+ PAGE_SHIFT
);
3572 context
->sq_cur_sge_blk_addr
=
3573 ((ibqp
->qp_type
== IB_QPT_GSI
) || hr_qp
->sq
.max_gs
> 2) ?
3574 ((u32
)(mtts
[hr_qp
->sge
.offset
/ page_size
]
3575 >> PAGE_ADDR_SHIFT
)) : 0;
3576 roce_set_field(context
->byte_184_irrl_idx
,
3577 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M
,
3578 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S
,
3579 ((ibqp
->qp_type
== IB_QPT_GSI
) || hr_qp
->sq
.max_gs
> 2) ?
3580 (mtts
[hr_qp
->sge
.offset
/ page_size
] >>
3581 (32 + PAGE_ADDR_SHIFT
)) : 0);
3582 qpc_mask
->sq_cur_sge_blk_addr
= 0;
3583 roce_set_field(qpc_mask
->byte_184_irrl_idx
,
3584 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M
,
3585 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S
, 0);
3587 context
->rx_sq_cur_blk_addr
= (u32
)(mtts
[0] >> PAGE_ADDR_SHIFT
);
3588 roce_set_field(context
->byte_232_irrl_sge
,
3589 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M
,
3590 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S
,
3591 mtts
[0] >> (32 + PAGE_ADDR_SHIFT
));
3592 qpc_mask
->rx_sq_cur_blk_addr
= 0;
3593 roce_set_field(qpc_mask
->byte_232_irrl_sge
,
3594 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M
,
3595 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S
, 0);
3598 * Set some fields in context to zero, Because the default values
3599 * of all fields in context are zero, we need not set them to 0 again.
3600 * but we should set the relevant fields of context mask to 0.
3602 roce_set_field(qpc_mask
->byte_232_irrl_sge
,
3603 V2_QPC_BYTE_232_IRRL_SGE_IDX_M
,
3604 V2_QPC_BYTE_232_IRRL_SGE_IDX_S
, 0);
3606 roce_set_field(qpc_mask
->byte_240_irrl_tail
,
3607 V2_QPC_BYTE_240_RX_ACK_MSN_M
,
3608 V2_QPC_BYTE_240_RX_ACK_MSN_S
, 0);
3610 roce_set_field(context
->byte_244_rnr_rxack
,
3611 V2_QPC_BYTE_244_RX_ACK_EPSN_M
,
3612 V2_QPC_BYTE_244_RX_ACK_EPSN_S
, attr
->sq_psn
);
3613 roce_set_field(qpc_mask
->byte_244_rnr_rxack
,
3614 V2_QPC_BYTE_244_RX_ACK_EPSN_M
,
3615 V2_QPC_BYTE_244_RX_ACK_EPSN_S
, 0);
3617 roce_set_field(qpc_mask
->byte_248_ack_psn
,
3618 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M
,
3619 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S
, 0);
3620 roce_set_bit(qpc_mask
->byte_248_ack_psn
,
3621 V2_QPC_BYTE_248_IRRL_PSN_VLD_S
, 0);
3622 roce_set_field(qpc_mask
->byte_248_ack_psn
,
3623 V2_QPC_BYTE_248_IRRL_PSN_M
,
3624 V2_QPC_BYTE_248_IRRL_PSN_S
, 0);
3626 roce_set_field(qpc_mask
->byte_240_irrl_tail
,
3627 V2_QPC_BYTE_240_IRRL_TAIL_REAL_M
,
3628 V2_QPC_BYTE_240_IRRL_TAIL_REAL_S
, 0);
3630 roce_set_field(context
->byte_220_retry_psn_msn
,
3631 V2_QPC_BYTE_220_RETRY_MSG_PSN_M
,
3632 V2_QPC_BYTE_220_RETRY_MSG_PSN_S
, attr
->sq_psn
);
3633 roce_set_field(qpc_mask
->byte_220_retry_psn_msn
,
3634 V2_QPC_BYTE_220_RETRY_MSG_PSN_M
,
3635 V2_QPC_BYTE_220_RETRY_MSG_PSN_S
, 0);
3637 roce_set_field(context
->byte_224_retry_msg
,
3638 V2_QPC_BYTE_224_RETRY_MSG_PSN_M
,
3639 V2_QPC_BYTE_224_RETRY_MSG_PSN_S
, attr
->sq_psn
>> 16);
3640 roce_set_field(qpc_mask
->byte_224_retry_msg
,
3641 V2_QPC_BYTE_224_RETRY_MSG_PSN_M
,
3642 V2_QPC_BYTE_224_RETRY_MSG_PSN_S
, 0);
3644 roce_set_field(context
->byte_224_retry_msg
,
3645 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M
,
3646 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S
, attr
->sq_psn
);
3647 roce_set_field(qpc_mask
->byte_224_retry_msg
,
3648 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M
,
3649 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S
, 0);
3651 roce_set_field(qpc_mask
->byte_220_retry_psn_msn
,
3652 V2_QPC_BYTE_220_RETRY_MSG_MSN_M
,
3653 V2_QPC_BYTE_220_RETRY_MSG_MSN_S
, 0);
3655 roce_set_bit(qpc_mask
->byte_248_ack_psn
,
3656 V2_QPC_BYTE_248_RNR_RETRY_FLAG_S
, 0);
3658 roce_set_field(qpc_mask
->byte_212_lsn
, V2_QPC_BYTE_212_CHECK_FLG_M
,
3659 V2_QPC_BYTE_212_CHECK_FLG_S
, 0);
3661 roce_set_field(context
->byte_212_lsn
, V2_QPC_BYTE_212_RETRY_CNT_M
,
3662 V2_QPC_BYTE_212_RETRY_CNT_S
, attr
->retry_cnt
);
3663 roce_set_field(qpc_mask
->byte_212_lsn
, V2_QPC_BYTE_212_RETRY_CNT_M
,
3664 V2_QPC_BYTE_212_RETRY_CNT_S
, 0);
3666 roce_set_field(context
->byte_212_lsn
, V2_QPC_BYTE_212_RETRY_NUM_INIT_M
,
3667 V2_QPC_BYTE_212_RETRY_NUM_INIT_S
, attr
->retry_cnt
);
3668 roce_set_field(qpc_mask
->byte_212_lsn
, V2_QPC_BYTE_212_RETRY_NUM_INIT_M
,
3669 V2_QPC_BYTE_212_RETRY_NUM_INIT_S
, 0);
3671 roce_set_field(context
->byte_244_rnr_rxack
,
3672 V2_QPC_BYTE_244_RNR_NUM_INIT_M
,
3673 V2_QPC_BYTE_244_RNR_NUM_INIT_S
, attr
->rnr_retry
);
3674 roce_set_field(qpc_mask
->byte_244_rnr_rxack
,
3675 V2_QPC_BYTE_244_RNR_NUM_INIT_M
,
3676 V2_QPC_BYTE_244_RNR_NUM_INIT_S
, 0);
3678 roce_set_field(context
->byte_244_rnr_rxack
, V2_QPC_BYTE_244_RNR_CNT_M
,
3679 V2_QPC_BYTE_244_RNR_CNT_S
, attr
->rnr_retry
);
3680 roce_set_field(qpc_mask
->byte_244_rnr_rxack
, V2_QPC_BYTE_244_RNR_CNT_M
,
3681 V2_QPC_BYTE_244_RNR_CNT_S
, 0);
3683 roce_set_field(context
->byte_212_lsn
, V2_QPC_BYTE_212_LSN_M
,
3684 V2_QPC_BYTE_212_LSN_S
, 0x100);
3685 roce_set_field(qpc_mask
->byte_212_lsn
, V2_QPC_BYTE_212_LSN_M
,
3686 V2_QPC_BYTE_212_LSN_S
, 0);
3688 if (attr_mask
& IB_QP_TIMEOUT
) {
3689 roce_set_field(context
->byte_28_at_fl
, V2_QPC_BYTE_28_AT_M
,
3690 V2_QPC_BYTE_28_AT_S
, attr
->timeout
);
3691 roce_set_field(qpc_mask
->byte_28_at_fl
, V2_QPC_BYTE_28_AT_M
,
3692 V2_QPC_BYTE_28_AT_S
, 0);
3695 roce_set_field(context
->byte_172_sq_psn
, V2_QPC_BYTE_172_SQ_CUR_PSN_M
,
3696 V2_QPC_BYTE_172_SQ_CUR_PSN_S
, attr
->sq_psn
);
3697 roce_set_field(qpc_mask
->byte_172_sq_psn
, V2_QPC_BYTE_172_SQ_CUR_PSN_M
,
3698 V2_QPC_BYTE_172_SQ_CUR_PSN_S
, 0);
3700 roce_set_field(qpc_mask
->byte_196_sq_psn
, V2_QPC_BYTE_196_IRRL_HEAD_M
,
3701 V2_QPC_BYTE_196_IRRL_HEAD_S
, 0);
3702 roce_set_field(context
->byte_196_sq_psn
, V2_QPC_BYTE_196_SQ_MAX_PSN_M
,
3703 V2_QPC_BYTE_196_SQ_MAX_PSN_S
, attr
->sq_psn
);
3704 roce_set_field(qpc_mask
->byte_196_sq_psn
, V2_QPC_BYTE_196_SQ_MAX_PSN_M
,
3705 V2_QPC_BYTE_196_SQ_MAX_PSN_S
, 0);
3707 if ((attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
) && attr
->max_rd_atomic
) {
3708 roce_set_field(context
->byte_208_irrl
, V2_QPC_BYTE_208_SR_MAX_M
,
3709 V2_QPC_BYTE_208_SR_MAX_S
,
3710 fls(attr
->max_rd_atomic
- 1));
3711 roce_set_field(qpc_mask
->byte_208_irrl
,
3712 V2_QPC_BYTE_208_SR_MAX_M
,
3713 V2_QPC_BYTE_208_SR_MAX_S
, 0);
3718 static inline bool hns_roce_v2_check_qp_stat(enum ib_qp_state cur_state
,
3719 enum ib_qp_state new_state
)
3722 if ((cur_state
!= IB_QPS_RESET
&&
3723 (new_state
== IB_QPS_ERR
|| new_state
== IB_QPS_RESET
)) ||
3724 ((cur_state
== IB_QPS_RTS
|| cur_state
== IB_QPS_SQD
) &&
3725 (new_state
== IB_QPS_RTS
|| new_state
== IB_QPS_SQD
)) ||
3726 (cur_state
== IB_QPS_SQE
&& new_state
== IB_QPS_RTS
))
3733 static int hns_roce_v2_modify_qp(struct ib_qp
*ibqp
,
3734 const struct ib_qp_attr
*attr
,
3735 int attr_mask
, enum ib_qp_state cur_state
,
3736 enum ib_qp_state new_state
)
3738 struct hns_roce_dev
*hr_dev
= to_hr_dev(ibqp
->device
);
3739 struct hns_roce_qp
*hr_qp
= to_hr_qp(ibqp
);
3740 struct hns_roce_v2_qp_context
*context
;
3741 struct hns_roce_v2_qp_context
*qpc_mask
;
3742 struct device
*dev
= hr_dev
->dev
;
3745 context
= kcalloc(2, sizeof(*context
), GFP_KERNEL
);
3749 qpc_mask
= context
+ 1;
3751 * In v2 engine, software pass context and context mask to hardware
3752 * when modifying qp. If software need modify some fields in context,
3753 * we should set all bits of the relevant fields in context mask to
3754 * 0 at the same time, else set them to 0x1.
3756 memset(qpc_mask
, 0xff, sizeof(*qpc_mask
));
3757 if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
) {
3758 memset(qpc_mask
, 0, sizeof(*qpc_mask
));
3759 modify_qp_reset_to_init(ibqp
, attr
, attr_mask
, context
,
3761 } else if (cur_state
== IB_QPS_INIT
&& new_state
== IB_QPS_INIT
) {
3762 modify_qp_init_to_init(ibqp
, attr
, attr_mask
, context
,
3764 } else if (cur_state
== IB_QPS_INIT
&& new_state
== IB_QPS_RTR
) {
3765 ret
= modify_qp_init_to_rtr(ibqp
, attr
, attr_mask
, context
,
3769 } else if (cur_state
== IB_QPS_RTR
&& new_state
== IB_QPS_RTS
) {
3770 ret
= modify_qp_rtr_to_rts(ibqp
, attr
, attr_mask
, context
,
3774 } else if (hns_roce_v2_check_qp_stat(cur_state
, new_state
)) {
3778 dev_err(dev
, "Illegal state for QP!\n");
3783 /* When QP state is err, SQ and RQ WQE should be flushed */
3784 if (new_state
== IB_QPS_ERR
) {
3785 roce_set_field(context
->byte_160_sq_ci_pi
,
3786 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M
,
3787 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S
,
3789 roce_set_field(qpc_mask
->byte_160_sq_ci_pi
,
3790 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M
,
3791 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S
, 0);
3792 roce_set_field(context
->byte_84_rq_ci_pi
,
3793 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M
,
3794 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S
,
3796 roce_set_field(qpc_mask
->byte_84_rq_ci_pi
,
3797 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M
,
3798 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S
, 0);
3801 if (attr_mask
& IB_QP_AV
) {
3802 const struct ib_global_route
*grh
=
3803 rdma_ah_read_grh(&attr
->ah_attr
);
3804 const struct ib_gid_attr
*gid_attr
= NULL
;
3805 u8 src_mac
[ETH_ALEN
];
3806 int is_roce_protocol
;
3811 ib_port
= (attr_mask
& IB_QP_PORT
) ? attr
->port_num
:
3813 hr_port
= ib_port
- 1;
3814 is_roce_protocol
= rdma_cap_eth_ah(&hr_dev
->ib_dev
, ib_port
) &&
3815 rdma_ah_get_ah_flags(&attr
->ah_attr
) & IB_AH_GRH
;
3817 if (is_roce_protocol
) {
3818 gid_attr
= attr
->ah_attr
.grh
.sgid_attr
;
3819 vlan
= rdma_vlan_dev_vlan_id(gid_attr
->ndev
);
3820 memcpy(src_mac
, gid_attr
->ndev
->dev_addr
, ETH_ALEN
);
3823 if (is_vlan_dev(gid_attr
->ndev
)) {
3824 roce_set_bit(context
->byte_76_srqn_op_en
,
3825 V2_QPC_BYTE_76_RQ_VLAN_EN_S
, 1);
3826 roce_set_bit(qpc_mask
->byte_76_srqn_op_en
,
3827 V2_QPC_BYTE_76_RQ_VLAN_EN_S
, 0);
3828 roce_set_bit(context
->byte_168_irrl_idx
,
3829 V2_QPC_BYTE_168_SQ_VLAN_EN_S
, 1);
3830 roce_set_bit(qpc_mask
->byte_168_irrl_idx
,
3831 V2_QPC_BYTE_168_SQ_VLAN_EN_S
, 0);
3834 roce_set_field(context
->byte_24_mtu_tc
,
3835 V2_QPC_BYTE_24_VLAN_ID_M
,
3836 V2_QPC_BYTE_24_VLAN_ID_S
, vlan
);
3837 roce_set_field(qpc_mask
->byte_24_mtu_tc
,
3838 V2_QPC_BYTE_24_VLAN_ID_M
,
3839 V2_QPC_BYTE_24_VLAN_ID_S
, 0);
3841 if (grh
->sgid_index
>= hr_dev
->caps
.gid_table_len
[hr_port
]) {
3842 dev_err(hr_dev
->dev
,
3843 "sgid_index(%u) too large. max is %d\n",
3845 hr_dev
->caps
.gid_table_len
[hr_port
]);
3850 if (attr
->ah_attr
.type
!= RDMA_AH_ATTR_TYPE_ROCE
) {
3851 dev_err(hr_dev
->dev
, "ah attr is not RDMA roce type\n");
3856 roce_set_field(context
->byte_52_udpspn_dmac
,
3857 V2_QPC_BYTE_52_UDPSPN_M
, V2_QPC_BYTE_52_UDPSPN_S
,
3858 (gid_attr
->gid_type
!= IB_GID_TYPE_ROCE_UDP_ENCAP
) ?
3861 roce_set_field(qpc_mask
->byte_52_udpspn_dmac
,
3862 V2_QPC_BYTE_52_UDPSPN_M
,
3863 V2_QPC_BYTE_52_UDPSPN_S
, 0);
3865 roce_set_field(context
->byte_20_smac_sgid_idx
,
3866 V2_QPC_BYTE_20_SGID_IDX_M
,
3867 V2_QPC_BYTE_20_SGID_IDX_S
, grh
->sgid_index
);
3869 roce_set_field(qpc_mask
->byte_20_smac_sgid_idx
,
3870 V2_QPC_BYTE_20_SGID_IDX_M
,
3871 V2_QPC_BYTE_20_SGID_IDX_S
, 0);
3873 roce_set_field(context
->byte_24_mtu_tc
,
3874 V2_QPC_BYTE_24_HOP_LIMIT_M
,
3875 V2_QPC_BYTE_24_HOP_LIMIT_S
, grh
->hop_limit
);
3876 roce_set_field(qpc_mask
->byte_24_mtu_tc
,
3877 V2_QPC_BYTE_24_HOP_LIMIT_M
,
3878 V2_QPC_BYTE_24_HOP_LIMIT_S
, 0);
3880 if (hr_dev
->pci_dev
->revision
== 0x21 &&
3881 gid_attr
->gid_type
== IB_GID_TYPE_ROCE_UDP_ENCAP
)
3882 roce_set_field(context
->byte_24_mtu_tc
,
3883 V2_QPC_BYTE_24_TC_M
, V2_QPC_BYTE_24_TC_S
,
3884 grh
->traffic_class
>> 2);
3886 roce_set_field(context
->byte_24_mtu_tc
,
3887 V2_QPC_BYTE_24_TC_M
, V2_QPC_BYTE_24_TC_S
,
3888 grh
->traffic_class
);
3889 roce_set_field(qpc_mask
->byte_24_mtu_tc
, V2_QPC_BYTE_24_TC_M
,
3890 V2_QPC_BYTE_24_TC_S
, 0);
3891 roce_set_field(context
->byte_28_at_fl
, V2_QPC_BYTE_28_FL_M
,
3892 V2_QPC_BYTE_28_FL_S
, grh
->flow_label
);
3893 roce_set_field(qpc_mask
->byte_28_at_fl
, V2_QPC_BYTE_28_FL_M
,
3894 V2_QPC_BYTE_28_FL_S
, 0);
3895 memcpy(context
->dgid
, grh
->dgid
.raw
, sizeof(grh
->dgid
.raw
));
3896 memset(qpc_mask
->dgid
, 0, sizeof(grh
->dgid
.raw
));
3897 roce_set_field(context
->byte_28_at_fl
, V2_QPC_BYTE_28_SL_M
,
3898 V2_QPC_BYTE_28_SL_S
,
3899 rdma_ah_get_sl(&attr
->ah_attr
));
3900 roce_set_field(qpc_mask
->byte_28_at_fl
, V2_QPC_BYTE_28_SL_M
,
3901 V2_QPC_BYTE_28_SL_S
, 0);
3902 hr_qp
->sl
= rdma_ah_get_sl(&attr
->ah_attr
);
3905 if (attr_mask
& (IB_QP_ACCESS_FLAGS
| IB_QP_MAX_DEST_RD_ATOMIC
))
3906 set_access_flags(hr_qp
, context
, qpc_mask
, attr
, attr_mask
);
3908 roce_set_bit(context
->byte_108_rx_reqepsn
, V2_QPC_BYTE_108_INV_CREDIT_S
,
3910 roce_set_bit(qpc_mask
->byte_108_rx_reqepsn
,
3911 V2_QPC_BYTE_108_INV_CREDIT_S
, 0);
3913 /* Every status migrate must change state */
3914 roce_set_field(context
->byte_60_qpst_tempid
, V2_QPC_BYTE_60_QP_ST_M
,
3915 V2_QPC_BYTE_60_QP_ST_S
, new_state
);
3916 roce_set_field(qpc_mask
->byte_60_qpst_tempid
, V2_QPC_BYTE_60_QP_ST_M
,
3917 V2_QPC_BYTE_60_QP_ST_S
, 0);
3919 /* SW pass context to HW */
3920 ret
= hns_roce_v2_qp_modify(hr_dev
, &hr_qp
->mtt
, cur_state
, new_state
,
3923 dev_err(dev
, "hns_roce_qp_modify failed(%d)\n", ret
);
3927 hr_qp
->state
= new_state
;
3929 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
3930 hr_qp
->atomic_rd_en
= attr
->qp_access_flags
;
3932 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
3933 hr_qp
->resp_depth
= attr
->max_dest_rd_atomic
;
3934 if (attr_mask
& IB_QP_PORT
) {
3935 hr_qp
->port
= attr
->port_num
- 1;
3936 hr_qp
->phy_port
= hr_dev
->iboe
.phy_port
[hr_qp
->port
];
3939 if (new_state
== IB_QPS_RESET
&& !ibqp
->uobject
) {
3940 hns_roce_v2_cq_clean(to_hr_cq(ibqp
->recv_cq
), hr_qp
->qpn
,
3941 ibqp
->srq
? to_hr_srq(ibqp
->srq
) : NULL
);
3942 if (ibqp
->send_cq
!= ibqp
->recv_cq
)
3943 hns_roce_v2_cq_clean(to_hr_cq(ibqp
->send_cq
),
3950 hr_qp
->sq_next_wqe
= 0;
3951 hr_qp
->next_sge
= 0;
3952 if (hr_qp
->rq
.wqe_cnt
)
3953 *hr_qp
->rdb
.db_record
= 0;
3961 static inline enum ib_qp_state
to_ib_qp_st(enum hns_roce_v2_qp_state state
)
3964 case HNS_ROCE_QP_ST_RST
: return IB_QPS_RESET
;
3965 case HNS_ROCE_QP_ST_INIT
: return IB_QPS_INIT
;
3966 case HNS_ROCE_QP_ST_RTR
: return IB_QPS_RTR
;
3967 case HNS_ROCE_QP_ST_RTS
: return IB_QPS_RTS
;
3968 case HNS_ROCE_QP_ST_SQ_DRAINING
:
3969 case HNS_ROCE_QP_ST_SQD
: return IB_QPS_SQD
;
3970 case HNS_ROCE_QP_ST_SQER
: return IB_QPS_SQE
;
3971 case HNS_ROCE_QP_ST_ERR
: return IB_QPS_ERR
;
3976 static int hns_roce_v2_query_qpc(struct hns_roce_dev
*hr_dev
,
3977 struct hns_roce_qp
*hr_qp
,
3978 struct hns_roce_v2_qp_context
*hr_context
)
3980 struct hns_roce_cmd_mailbox
*mailbox
;
3983 mailbox
= hns_roce_alloc_cmd_mailbox(hr_dev
);
3984 if (IS_ERR(mailbox
))
3985 return PTR_ERR(mailbox
);
3987 ret
= hns_roce_cmd_mbox(hr_dev
, 0, mailbox
->dma
, hr_qp
->qpn
, 0,
3988 HNS_ROCE_CMD_QUERY_QPC
,
3989 HNS_ROCE_CMD_TIMEOUT_MSECS
);
3991 dev_err(hr_dev
->dev
, "QUERY QP cmd process error\n");
3995 memcpy(hr_context
, mailbox
->buf
, sizeof(*hr_context
));
3998 hns_roce_free_cmd_mailbox(hr_dev
, mailbox
);
4002 static int hns_roce_v2_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*qp_attr
,
4004 struct ib_qp_init_attr
*qp_init_attr
)
4006 struct hns_roce_dev
*hr_dev
= to_hr_dev(ibqp
->device
);
4007 struct hns_roce_qp
*hr_qp
= to_hr_qp(ibqp
);
4008 struct hns_roce_v2_qp_context
*context
;
4009 struct device
*dev
= hr_dev
->dev
;
4014 context
= kzalloc(sizeof(*context
), GFP_KERNEL
);
4018 memset(qp_attr
, 0, sizeof(*qp_attr
));
4019 memset(qp_init_attr
, 0, sizeof(*qp_init_attr
));
4021 mutex_lock(&hr_qp
->mutex
);
4023 if (hr_qp
->state
== IB_QPS_RESET
) {
4024 qp_attr
->qp_state
= IB_QPS_RESET
;
4029 ret
= hns_roce_v2_query_qpc(hr_dev
, hr_qp
, context
);
4031 dev_err(dev
, "query qpc error\n");
4036 state
= roce_get_field(context
->byte_60_qpst_tempid
,
4037 V2_QPC_BYTE_60_QP_ST_M
, V2_QPC_BYTE_60_QP_ST_S
);
4038 tmp_qp_state
= to_ib_qp_st((enum hns_roce_v2_qp_state
)state
);
4039 if (tmp_qp_state
== -1) {
4040 dev_err(dev
, "Illegal ib_qp_state\n");
4044 hr_qp
->state
= (u8
)tmp_qp_state
;
4045 qp_attr
->qp_state
= (enum ib_qp_state
)hr_qp
->state
;
4046 qp_attr
->path_mtu
= (enum ib_mtu
)roce_get_field(context
->byte_24_mtu_tc
,
4047 V2_QPC_BYTE_24_MTU_M
,
4048 V2_QPC_BYTE_24_MTU_S
);
4049 qp_attr
->path_mig_state
= IB_MIG_ARMED
;
4050 qp_attr
->ah_attr
.type
= RDMA_AH_ATTR_TYPE_ROCE
;
4051 if (hr_qp
->ibqp
.qp_type
== IB_QPT_UD
)
4052 qp_attr
->qkey
= V2_QKEY_VAL
;
4054 qp_attr
->rq_psn
= roce_get_field(context
->byte_108_rx_reqepsn
,
4055 V2_QPC_BYTE_108_RX_REQ_EPSN_M
,
4056 V2_QPC_BYTE_108_RX_REQ_EPSN_S
);
4057 qp_attr
->sq_psn
= (u32
)roce_get_field(context
->byte_172_sq_psn
,
4058 V2_QPC_BYTE_172_SQ_CUR_PSN_M
,
4059 V2_QPC_BYTE_172_SQ_CUR_PSN_S
);
4060 qp_attr
->dest_qp_num
= (u8
)roce_get_field(context
->byte_56_dqpn_err
,
4061 V2_QPC_BYTE_56_DQPN_M
,
4062 V2_QPC_BYTE_56_DQPN_S
);
4063 qp_attr
->qp_access_flags
= ((roce_get_bit(context
->byte_76_srqn_op_en
,
4064 V2_QPC_BYTE_76_RRE_S
)) << 2) |
4065 ((roce_get_bit(context
->byte_76_srqn_op_en
,
4066 V2_QPC_BYTE_76_RWE_S
)) << 1) |
4067 ((roce_get_bit(context
->byte_76_srqn_op_en
,
4068 V2_QPC_BYTE_76_ATE_S
)) << 3);
4069 if (hr_qp
->ibqp
.qp_type
== IB_QPT_RC
||
4070 hr_qp
->ibqp
.qp_type
== IB_QPT_UC
) {
4071 struct ib_global_route
*grh
=
4072 rdma_ah_retrieve_grh(&qp_attr
->ah_attr
);
4074 rdma_ah_set_sl(&qp_attr
->ah_attr
,
4075 roce_get_field(context
->byte_28_at_fl
,
4076 V2_QPC_BYTE_28_SL_M
,
4077 V2_QPC_BYTE_28_SL_S
));
4078 grh
->flow_label
= roce_get_field(context
->byte_28_at_fl
,
4079 V2_QPC_BYTE_28_FL_M
,
4080 V2_QPC_BYTE_28_FL_S
);
4081 grh
->sgid_index
= roce_get_field(context
->byte_20_smac_sgid_idx
,
4082 V2_QPC_BYTE_20_SGID_IDX_M
,
4083 V2_QPC_BYTE_20_SGID_IDX_S
);
4084 grh
->hop_limit
= roce_get_field(context
->byte_24_mtu_tc
,
4085 V2_QPC_BYTE_24_HOP_LIMIT_M
,
4086 V2_QPC_BYTE_24_HOP_LIMIT_S
);
4087 grh
->traffic_class
= roce_get_field(context
->byte_24_mtu_tc
,
4088 V2_QPC_BYTE_24_TC_M
,
4089 V2_QPC_BYTE_24_TC_S
);
4091 memcpy(grh
->dgid
.raw
, context
->dgid
, sizeof(grh
->dgid
.raw
));
4094 qp_attr
->port_num
= hr_qp
->port
+ 1;
4095 qp_attr
->sq_draining
= 0;
4096 qp_attr
->max_rd_atomic
= 1 << roce_get_field(context
->byte_208_irrl
,
4097 V2_QPC_BYTE_208_SR_MAX_M
,
4098 V2_QPC_BYTE_208_SR_MAX_S
);
4099 qp_attr
->max_dest_rd_atomic
= 1 << roce_get_field(context
->byte_140_raq
,
4100 V2_QPC_BYTE_140_RR_MAX_M
,
4101 V2_QPC_BYTE_140_RR_MAX_S
);
4102 qp_attr
->min_rnr_timer
= (u8
)roce_get_field(context
->byte_80_rnr_rx_cqn
,
4103 V2_QPC_BYTE_80_MIN_RNR_TIME_M
,
4104 V2_QPC_BYTE_80_MIN_RNR_TIME_S
);
4105 qp_attr
->timeout
= (u8
)roce_get_field(context
->byte_28_at_fl
,
4106 V2_QPC_BYTE_28_AT_M
,
4107 V2_QPC_BYTE_28_AT_S
);
4108 qp_attr
->retry_cnt
= roce_get_field(context
->byte_212_lsn
,
4109 V2_QPC_BYTE_212_RETRY_CNT_M
,
4110 V2_QPC_BYTE_212_RETRY_CNT_S
);
4111 qp_attr
->rnr_retry
= context
->rq_rnr_timer
;
4114 qp_attr
->cur_qp_state
= qp_attr
->qp_state
;
4115 qp_attr
->cap
.max_recv_wr
= hr_qp
->rq
.wqe_cnt
;
4116 qp_attr
->cap
.max_recv_sge
= hr_qp
->rq
.max_gs
;
4118 if (!ibqp
->uobject
) {
4119 qp_attr
->cap
.max_send_wr
= hr_qp
->sq
.wqe_cnt
;
4120 qp_attr
->cap
.max_send_sge
= hr_qp
->sq
.max_gs
;
4122 qp_attr
->cap
.max_send_wr
= 0;
4123 qp_attr
->cap
.max_send_sge
= 0;
4126 qp_init_attr
->cap
= qp_attr
->cap
;
4129 mutex_unlock(&hr_qp
->mutex
);
4134 static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev
*hr_dev
,
4135 struct hns_roce_qp
*hr_qp
,
4138 struct hns_roce_cq
*send_cq
, *recv_cq
;
4139 struct device
*dev
= hr_dev
->dev
;
4142 if (hr_qp
->ibqp
.qp_type
== IB_QPT_RC
&& hr_qp
->state
!= IB_QPS_RESET
) {
4143 /* Modify qp to reset before destroying qp */
4144 ret
= hns_roce_v2_modify_qp(&hr_qp
->ibqp
, NULL
, 0,
4145 hr_qp
->state
, IB_QPS_RESET
);
4147 dev_err(dev
, "modify QP %06lx to ERR failed.\n",
4153 send_cq
= to_hr_cq(hr_qp
->ibqp
.send_cq
);
4154 recv_cq
= to_hr_cq(hr_qp
->ibqp
.recv_cq
);
4156 hns_roce_lock_cqs(send_cq
, recv_cq
);
4159 __hns_roce_v2_cq_clean(recv_cq
, hr_qp
->qpn
, hr_qp
->ibqp
.srq
?
4160 to_hr_srq(hr_qp
->ibqp
.srq
) : NULL
);
4161 if (send_cq
!= recv_cq
)
4162 __hns_roce_v2_cq_clean(send_cq
, hr_qp
->qpn
, NULL
);
4165 hns_roce_qp_remove(hr_dev
, hr_qp
);
4167 hns_roce_unlock_cqs(send_cq
, recv_cq
);
4169 hns_roce_qp_free(hr_dev
, hr_qp
);
4171 /* Not special_QP, free their QPN */
4172 if ((hr_qp
->ibqp
.qp_type
== IB_QPT_RC
) ||
4173 (hr_qp
->ibqp
.qp_type
== IB_QPT_UC
) ||
4174 (hr_qp
->ibqp
.qp_type
== IB_QPT_UD
))
4175 hns_roce_release_range_qp(hr_dev
, hr_qp
->qpn
, 1);
4177 hns_roce_mtt_cleanup(hr_dev
, &hr_qp
->mtt
);
4180 if (hr_qp
->sq
.wqe_cnt
&& (hr_qp
->sdb_en
== 1))
4181 hns_roce_db_unmap_user(
4182 to_hr_ucontext(hr_qp
->ibqp
.uobject
->context
),
4185 if (hr_qp
->rq
.wqe_cnt
&& (hr_qp
->rdb_en
== 1))
4186 hns_roce_db_unmap_user(
4187 to_hr_ucontext(hr_qp
->ibqp
.uobject
->context
),
4189 ib_umem_release(hr_qp
->umem
);
4191 kfree(hr_qp
->sq
.wrid
);
4192 kfree(hr_qp
->rq
.wrid
);
4193 hns_roce_buf_free(hr_dev
, hr_qp
->buff_size
, &hr_qp
->hr_buf
);
4194 if (hr_qp
->rq
.wqe_cnt
)
4195 hns_roce_free_db(hr_dev
, &hr_qp
->rdb
);
4198 if ((hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_RQ_INLINE
) &&
4199 hr_qp
->rq
.wqe_cnt
) {
4200 kfree(hr_qp
->rq_inl_buf
.wqe_list
[0].sg_list
);
4201 kfree(hr_qp
->rq_inl_buf
.wqe_list
);
4207 static int hns_roce_v2_destroy_qp(struct ib_qp
*ibqp
)
4209 struct hns_roce_dev
*hr_dev
= to_hr_dev(ibqp
->device
);
4210 struct hns_roce_qp
*hr_qp
= to_hr_qp(ibqp
);
4213 ret
= hns_roce_v2_destroy_qp_common(hr_dev
, hr_qp
, ibqp
->uobject
);
4215 dev_err(hr_dev
->dev
, "Destroy qp failed(%d)\n", ret
);
4219 if (hr_qp
->ibqp
.qp_type
== IB_QPT_GSI
)
4220 kfree(hr_to_hr_sqp(hr_qp
));
4227 static int hns_roce_v2_modify_cq(struct ib_cq
*cq
, u16 cq_count
, u16 cq_period
)
4229 struct hns_roce_dev
*hr_dev
= to_hr_dev(cq
->device
);
4230 struct hns_roce_v2_cq_context
*cq_context
;
4231 struct hns_roce_cq
*hr_cq
= to_hr_cq(cq
);
4232 struct hns_roce_v2_cq_context
*cqc_mask
;
4233 struct hns_roce_cmd_mailbox
*mailbox
;
4236 mailbox
= hns_roce_alloc_cmd_mailbox(hr_dev
);
4237 if (IS_ERR(mailbox
))
4238 return PTR_ERR(mailbox
);
4240 cq_context
= mailbox
->buf
;
4241 cqc_mask
= (struct hns_roce_v2_cq_context
*)mailbox
->buf
+ 1;
4243 memset(cqc_mask
, 0xff, sizeof(*cqc_mask
));
4245 roce_set_field(cq_context
->byte_56_cqe_period_maxcnt
,
4246 V2_CQC_BYTE_56_CQ_MAX_CNT_M
, V2_CQC_BYTE_56_CQ_MAX_CNT_S
,
4248 roce_set_field(cqc_mask
->byte_56_cqe_period_maxcnt
,
4249 V2_CQC_BYTE_56_CQ_MAX_CNT_M
, V2_CQC_BYTE_56_CQ_MAX_CNT_S
,
4251 roce_set_field(cq_context
->byte_56_cqe_period_maxcnt
,
4252 V2_CQC_BYTE_56_CQ_PERIOD_M
, V2_CQC_BYTE_56_CQ_PERIOD_S
,
4254 roce_set_field(cqc_mask
->byte_56_cqe_period_maxcnt
,
4255 V2_CQC_BYTE_56_CQ_PERIOD_M
, V2_CQC_BYTE_56_CQ_PERIOD_S
,
4258 ret
= hns_roce_cmd_mbox(hr_dev
, mailbox
->dma
, 0, hr_cq
->cqn
, 1,
4259 HNS_ROCE_CMD_MODIFY_CQC
,
4260 HNS_ROCE_CMD_TIMEOUT_MSECS
);
4261 hns_roce_free_cmd_mailbox(hr_dev
, mailbox
);
4263 dev_err(hr_dev
->dev
, "MODIFY CQ Failed to cmd mailbox.\n");
4268 static void hns_roce_set_qps_to_err(struct hns_roce_dev
*hr_dev
, u32 qpn
)
4270 struct hns_roce_qp
*hr_qp
;
4271 struct ib_qp_attr attr
;
4275 hr_qp
= __hns_roce_qp_lookup(hr_dev
, qpn
);
4277 dev_warn(hr_dev
->dev
, "no hr_qp can be found!\n");
4281 if (hr_qp
->ibqp
.uobject
) {
4282 if (hr_qp
->sdb_en
== 1) {
4283 hr_qp
->sq
.head
= *(int *)(hr_qp
->sdb
.virt_addr
);
4284 hr_qp
->rq
.head
= *(int *)(hr_qp
->rdb
.virt_addr
);
4286 dev_warn(hr_dev
->dev
, "flush cqe is unsupported in userspace!\n");
4291 attr_mask
= IB_QP_STATE
;
4292 attr
.qp_state
= IB_QPS_ERR
;
4293 ret
= hns_roce_v2_modify_qp(&hr_qp
->ibqp
, &attr
, attr_mask
,
4294 hr_qp
->state
, IB_QPS_ERR
);
4296 dev_err(hr_dev
->dev
, "failed to modify qp %d to err state.\n",
4300 static void hns_roce_irq_work_handle(struct work_struct
*work
)
4302 struct hns_roce_work
*irq_work
=
4303 container_of(work
, struct hns_roce_work
, work
);
4304 struct device
*dev
= irq_work
->hr_dev
->dev
;
4305 u32 qpn
= irq_work
->qpn
;
4306 u32 cqn
= irq_work
->cqn
;
4308 switch (irq_work
->event_type
) {
4309 case HNS_ROCE_EVENT_TYPE_PATH_MIG
:
4310 dev_info(dev
, "Path migrated succeeded.\n");
4312 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED
:
4313 dev_warn(dev
, "Path migration failed.\n");
4315 case HNS_ROCE_EVENT_TYPE_COMM_EST
:
4316 dev_info(dev
, "Communication established.\n");
4318 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED
:
4319 dev_warn(dev
, "Send queue drained.\n");
4321 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR
:
4322 dev_err(dev
, "Local work queue catastrophic error.\n");
4323 hns_roce_set_qps_to_err(irq_work
->hr_dev
, qpn
);
4324 switch (irq_work
->sub_type
) {
4325 case HNS_ROCE_LWQCE_QPC_ERROR
:
4326 dev_err(dev
, "QP %d, QPC error.\n", qpn
);
4328 case HNS_ROCE_LWQCE_MTU_ERROR
:
4329 dev_err(dev
, "QP %d, MTU error.\n", qpn
);
4331 case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR
:
4332 dev_err(dev
, "QP %d, WQE BA addr error.\n", qpn
);
4334 case HNS_ROCE_LWQCE_WQE_ADDR_ERROR
:
4335 dev_err(dev
, "QP %d, WQE addr error.\n", qpn
);
4337 case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR
:
4338 dev_err(dev
, "QP %d, WQE shift error.\n", qpn
);
4341 dev_err(dev
, "Unhandled sub_event type %d.\n",
4342 irq_work
->sub_type
);
4346 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR
:
4347 dev_err(dev
, "Invalid request local work queue error.\n");
4348 hns_roce_set_qps_to_err(irq_work
->hr_dev
, qpn
);
4350 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR
:
4351 dev_err(dev
, "Local access violation work queue error.\n");
4352 hns_roce_set_qps_to_err(irq_work
->hr_dev
, qpn
);
4353 switch (irq_work
->sub_type
) {
4354 case HNS_ROCE_LAVWQE_R_KEY_VIOLATION
:
4355 dev_err(dev
, "QP %d, R_key violation.\n", qpn
);
4357 case HNS_ROCE_LAVWQE_LENGTH_ERROR
:
4358 dev_err(dev
, "QP %d, length error.\n", qpn
);
4360 case HNS_ROCE_LAVWQE_VA_ERROR
:
4361 dev_err(dev
, "QP %d, VA error.\n", qpn
);
4363 case HNS_ROCE_LAVWQE_PD_ERROR
:
4364 dev_err(dev
, "QP %d, PD error.\n", qpn
);
4366 case HNS_ROCE_LAVWQE_RW_ACC_ERROR
:
4367 dev_err(dev
, "QP %d, rw acc error.\n", qpn
);
4369 case HNS_ROCE_LAVWQE_KEY_STATE_ERROR
:
4370 dev_err(dev
, "QP %d, key state error.\n", qpn
);
4372 case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR
:
4373 dev_err(dev
, "QP %d, MR operation error.\n", qpn
);
4376 dev_err(dev
, "Unhandled sub_event type %d.\n",
4377 irq_work
->sub_type
);
4381 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH
:
4382 dev_warn(dev
, "SRQ limit reach.\n");
4384 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH
:
4385 dev_warn(dev
, "SRQ last wqe reach.\n");
4387 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR
:
4388 dev_err(dev
, "SRQ catas error.\n");
4390 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR
:
4391 dev_err(dev
, "CQ 0x%x access err.\n", cqn
);
4393 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW
:
4394 dev_warn(dev
, "CQ 0x%x overflow\n", cqn
);
4396 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW
:
4397 dev_warn(dev
, "DB overflow.\n");
4399 case HNS_ROCE_EVENT_TYPE_FLR
:
4400 dev_warn(dev
, "Function level reset.\n");
4409 static void hns_roce_v2_init_irq_work(struct hns_roce_dev
*hr_dev
,
4410 struct hns_roce_eq
*eq
,
4413 struct hns_roce_work
*irq_work
;
4415 irq_work
= kzalloc(sizeof(struct hns_roce_work
), GFP_ATOMIC
);
4419 INIT_WORK(&(irq_work
->work
), hns_roce_irq_work_handle
);
4420 irq_work
->hr_dev
= hr_dev
;
4421 irq_work
->qpn
= qpn
;
4422 irq_work
->cqn
= cqn
;
4423 irq_work
->event_type
= eq
->event_type
;
4424 irq_work
->sub_type
= eq
->sub_type
;
4425 queue_work(hr_dev
->irq_workq
, &(irq_work
->work
));
4428 static void set_eq_cons_index_v2(struct hns_roce_eq
*eq
)
4435 if (eq
->type_flag
== HNS_ROCE_AEQ
) {
4436 roce_set_field(doorbell
[0], HNS_ROCE_V2_EQ_DB_CMD_M
,
4437 HNS_ROCE_V2_EQ_DB_CMD_S
,
4438 eq
->arm_st
== HNS_ROCE_V2_EQ_ALWAYS_ARMED
?
4439 HNS_ROCE_EQ_DB_CMD_AEQ
:
4440 HNS_ROCE_EQ_DB_CMD_AEQ_ARMED
);
4442 roce_set_field(doorbell
[0], HNS_ROCE_V2_EQ_DB_TAG_M
,
4443 HNS_ROCE_V2_EQ_DB_TAG_S
, eq
->eqn
);
4445 roce_set_field(doorbell
[0], HNS_ROCE_V2_EQ_DB_CMD_M
,
4446 HNS_ROCE_V2_EQ_DB_CMD_S
,
4447 eq
->arm_st
== HNS_ROCE_V2_EQ_ALWAYS_ARMED
?
4448 HNS_ROCE_EQ_DB_CMD_CEQ
:
4449 HNS_ROCE_EQ_DB_CMD_CEQ_ARMED
);
4452 roce_set_field(doorbell
[1], HNS_ROCE_V2_EQ_DB_PARA_M
,
4453 HNS_ROCE_V2_EQ_DB_PARA_S
,
4454 (eq
->cons_index
& HNS_ROCE_V2_CONS_IDX_M
));
4456 hns_roce_write64_k(doorbell
, eq
->doorbell
);
4459 static struct hns_roce_aeqe
*get_aeqe_v2(struct hns_roce_eq
*eq
, u32 entry
)
4464 buf_chk_sz
= 1 << (eq
->eqe_buf_pg_sz
+ PAGE_SHIFT
);
4465 off
= (entry
& (eq
->entries
- 1)) * HNS_ROCE_AEQ_ENTRY_SIZE
;
4467 return (struct hns_roce_aeqe
*)((char *)(eq
->buf_list
->buf
) +
4471 static struct hns_roce_aeqe
*mhop_get_aeqe(struct hns_roce_eq
*eq
, u32 entry
)
4476 buf_chk_sz
= 1 << (eq
->eqe_buf_pg_sz
+ PAGE_SHIFT
);
4478 off
= (entry
& (eq
->entries
- 1)) * HNS_ROCE_AEQ_ENTRY_SIZE
;
4480 if (eq
->hop_num
== HNS_ROCE_HOP_NUM_0
)
4481 return (struct hns_roce_aeqe
*)((u8
*)(eq
->bt_l0
) +
4484 return (struct hns_roce_aeqe
*)((u8
*)
4485 (eq
->buf
[off
/ buf_chk_sz
]) + off
% buf_chk_sz
);
4488 static struct hns_roce_aeqe
*next_aeqe_sw_v2(struct hns_roce_eq
*eq
)
4490 struct hns_roce_aeqe
*aeqe
;
4493 aeqe
= get_aeqe_v2(eq
, eq
->cons_index
);
4495 aeqe
= mhop_get_aeqe(eq
, eq
->cons_index
);
4497 return (roce_get_bit(aeqe
->asyn
, HNS_ROCE_V2_AEQ_AEQE_OWNER_S
) ^
4498 !!(eq
->cons_index
& eq
->entries
)) ? aeqe
: NULL
;
4501 static int hns_roce_v2_aeq_int(struct hns_roce_dev
*hr_dev
,
4502 struct hns_roce_eq
*eq
)
4504 struct device
*dev
= hr_dev
->dev
;
4505 struct hns_roce_aeqe
*aeqe
;
4513 while ((aeqe
= next_aeqe_sw_v2(eq
))) {
4515 /* Make sure we read AEQ entry after we have checked the
4520 event_type
= roce_get_field(aeqe
->asyn
,
4521 HNS_ROCE_V2_AEQE_EVENT_TYPE_M
,
4522 HNS_ROCE_V2_AEQE_EVENT_TYPE_S
);
4523 sub_type
= roce_get_field(aeqe
->asyn
,
4524 HNS_ROCE_V2_AEQE_SUB_TYPE_M
,
4525 HNS_ROCE_V2_AEQE_SUB_TYPE_S
);
4526 qpn
= roce_get_field(aeqe
->event
.qp_event
.qp
,
4527 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M
,
4528 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S
);
4529 cqn
= roce_get_field(aeqe
->event
.cq_event
.cq
,
4530 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M
,
4531 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S
);
4532 srqn
= roce_get_field(aeqe
->event
.srq_event
.srq
,
4533 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M
,
4534 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S
);
4536 switch (event_type
) {
4537 case HNS_ROCE_EVENT_TYPE_PATH_MIG
:
4538 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED
:
4539 case HNS_ROCE_EVENT_TYPE_COMM_EST
:
4540 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED
:
4541 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR
:
4542 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH
:
4543 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR
:
4544 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR
:
4545 hns_roce_qp_event(hr_dev
, qpn
, event_type
);
4547 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH
:
4548 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR
:
4549 hns_roce_srq_event(hr_dev
, srqn
, event_type
);
4551 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR
:
4552 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW
:
4553 hns_roce_cq_event(hr_dev
, cqn
, event_type
);
4555 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW
:
4557 case HNS_ROCE_EVENT_TYPE_MB
:
4558 hns_roce_cmd_event(hr_dev
,
4559 le16_to_cpu(aeqe
->event
.cmd
.token
),
4560 aeqe
->event
.cmd
.status
,
4561 le64_to_cpu(aeqe
->event
.cmd
.out_param
));
4563 case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW
:
4565 case HNS_ROCE_EVENT_TYPE_FLR
:
4568 dev_err(dev
, "Unhandled event %d on EQ %d at idx %u.\n",
4569 event_type
, eq
->eqn
, eq
->cons_index
);
4573 eq
->event_type
= event_type
;
4574 eq
->sub_type
= sub_type
;
4578 if (eq
->cons_index
> (2 * eq
->entries
- 1)) {
4579 dev_warn(dev
, "cons_index overflow, set back to 0.\n");
4582 hns_roce_v2_init_irq_work(hr_dev
, eq
, qpn
, cqn
);
4585 set_eq_cons_index_v2(eq
);
4589 static struct hns_roce_ceqe
*get_ceqe_v2(struct hns_roce_eq
*eq
, u32 entry
)
4594 buf_chk_sz
= 1 << (eq
->eqe_buf_pg_sz
+ PAGE_SHIFT
);
4595 off
= (entry
& (eq
->entries
- 1)) * HNS_ROCE_CEQ_ENTRY_SIZE
;
4597 return (struct hns_roce_ceqe
*)((char *)(eq
->buf_list
->buf
) +
4601 static struct hns_roce_ceqe
*mhop_get_ceqe(struct hns_roce_eq
*eq
, u32 entry
)
4606 buf_chk_sz
= 1 << (eq
->eqe_buf_pg_sz
+ PAGE_SHIFT
);
4608 off
= (entry
& (eq
->entries
- 1)) * HNS_ROCE_CEQ_ENTRY_SIZE
;
4610 if (eq
->hop_num
== HNS_ROCE_HOP_NUM_0
)
4611 return (struct hns_roce_ceqe
*)((u8
*)(eq
->bt_l0
) +
4614 return (struct hns_roce_ceqe
*)((u8
*)(eq
->buf
[off
/
4615 buf_chk_sz
]) + off
% buf_chk_sz
);
4618 static struct hns_roce_ceqe
*next_ceqe_sw_v2(struct hns_roce_eq
*eq
)
4620 struct hns_roce_ceqe
*ceqe
;
4623 ceqe
= get_ceqe_v2(eq
, eq
->cons_index
);
4625 ceqe
= mhop_get_ceqe(eq
, eq
->cons_index
);
4627 return (!!(roce_get_bit(ceqe
->comp
, HNS_ROCE_V2_CEQ_CEQE_OWNER_S
))) ^
4628 (!!(eq
->cons_index
& eq
->entries
)) ? ceqe
: NULL
;
4631 static int hns_roce_v2_ceq_int(struct hns_roce_dev
*hr_dev
,
4632 struct hns_roce_eq
*eq
)
4634 struct device
*dev
= hr_dev
->dev
;
4635 struct hns_roce_ceqe
*ceqe
;
4639 while ((ceqe
= next_ceqe_sw_v2(eq
))) {
4641 /* Make sure we read CEQ entry after we have checked the
4646 cqn
= roce_get_field(ceqe
->comp
,
4647 HNS_ROCE_V2_CEQE_COMP_CQN_M
,
4648 HNS_ROCE_V2_CEQE_COMP_CQN_S
);
4650 hns_roce_cq_completion(hr_dev
, cqn
);
4655 if (eq
->cons_index
> (2 * eq
->entries
- 1)) {
4656 dev_warn(dev
, "cons_index overflow, set back to 0.\n");
4661 set_eq_cons_index_v2(eq
);
4666 static irqreturn_t
hns_roce_v2_msix_interrupt_eq(int irq
, void *eq_ptr
)
4668 struct hns_roce_eq
*eq
= eq_ptr
;
4669 struct hns_roce_dev
*hr_dev
= eq
->hr_dev
;
4672 if (eq
->type_flag
== HNS_ROCE_CEQ
)
4673 /* Completion event interrupt */
4674 int_work
= hns_roce_v2_ceq_int(hr_dev
, eq
);
4676 /* Asychronous event interrupt */
4677 int_work
= hns_roce_v2_aeq_int(hr_dev
, eq
);
4679 return IRQ_RETVAL(int_work
);
4682 static irqreturn_t
hns_roce_v2_msix_interrupt_abn(int irq
, void *dev_id
)
4684 struct hns_roce_dev
*hr_dev
= dev_id
;
4685 struct device
*dev
= hr_dev
->dev
;
4690 /* Abnormal interrupt */
4691 int_st
= roce_read(hr_dev
, ROCEE_VF_ABN_INT_ST_REG
);
4692 int_en
= roce_read(hr_dev
, ROCEE_VF_ABN_INT_EN_REG
);
4694 if (roce_get_bit(int_st
, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S
)) {
4695 dev_err(dev
, "AEQ overflow!\n");
4697 roce_set_bit(int_st
, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S
, 1);
4698 roce_write(hr_dev
, ROCEE_VF_ABN_INT_ST_REG
, int_st
);
4700 roce_set_bit(int_en
, HNS_ROCE_V2_VF_ABN_INT_EN_S
, 1);
4701 roce_write(hr_dev
, ROCEE_VF_ABN_INT_EN_REG
, int_en
);
4704 } else if (roce_get_bit(int_st
, HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S
)) {
4705 dev_err(dev
, "BUS ERR!\n");
4707 roce_set_bit(int_st
, HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S
, 1);
4708 roce_write(hr_dev
, ROCEE_VF_ABN_INT_ST_REG
, int_st
);
4710 roce_set_bit(int_en
, HNS_ROCE_V2_VF_ABN_INT_EN_S
, 1);
4711 roce_write(hr_dev
, ROCEE_VF_ABN_INT_EN_REG
, int_en
);
4714 } else if (roce_get_bit(int_st
, HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S
)) {
4715 dev_err(dev
, "OTHER ERR!\n");
4717 roce_set_bit(int_st
, HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S
, 1);
4718 roce_write(hr_dev
, ROCEE_VF_ABN_INT_ST_REG
, int_st
);
4720 roce_set_bit(int_en
, HNS_ROCE_V2_VF_ABN_INT_EN_S
, 1);
4721 roce_write(hr_dev
, ROCEE_VF_ABN_INT_EN_REG
, int_en
);
4725 dev_err(dev
, "There is no abnormal irq found!\n");
4727 return IRQ_RETVAL(int_work
);
4730 static void hns_roce_v2_int_mask_enable(struct hns_roce_dev
*hr_dev
,
4731 int eq_num
, int enable_flag
)
4735 if (enable_flag
== EQ_ENABLE
) {
4736 for (i
= 0; i
< eq_num
; i
++)
4737 roce_write(hr_dev
, ROCEE_VF_EVENT_INT_EN_REG
+
4739 HNS_ROCE_V2_VF_EVENT_INT_EN_M
);
4741 roce_write(hr_dev
, ROCEE_VF_ABN_INT_EN_REG
,
4742 HNS_ROCE_V2_VF_ABN_INT_EN_M
);
4743 roce_write(hr_dev
, ROCEE_VF_ABN_INT_CFG_REG
,
4744 HNS_ROCE_V2_VF_ABN_INT_CFG_M
);
4746 for (i
= 0; i
< eq_num
; i
++)
4747 roce_write(hr_dev
, ROCEE_VF_EVENT_INT_EN_REG
+
4749 HNS_ROCE_V2_VF_EVENT_INT_EN_M
& 0x0);
4751 roce_write(hr_dev
, ROCEE_VF_ABN_INT_EN_REG
,
4752 HNS_ROCE_V2_VF_ABN_INT_EN_M
& 0x0);
4753 roce_write(hr_dev
, ROCEE_VF_ABN_INT_CFG_REG
,
4754 HNS_ROCE_V2_VF_ABN_INT_CFG_M
& 0x0);
4758 static void hns_roce_v2_destroy_eqc(struct hns_roce_dev
*hr_dev
, int eqn
)
4760 struct device
*dev
= hr_dev
->dev
;
4763 if (eqn
< hr_dev
->caps
.num_comp_vectors
)
4764 ret
= hns_roce_cmd_mbox(hr_dev
, 0, 0, eqn
& HNS_ROCE_V2_EQN_M
,
4765 0, HNS_ROCE_CMD_DESTROY_CEQC
,
4766 HNS_ROCE_CMD_TIMEOUT_MSECS
);
4768 ret
= hns_roce_cmd_mbox(hr_dev
, 0, 0, eqn
& HNS_ROCE_V2_EQN_M
,
4769 0, HNS_ROCE_CMD_DESTROY_AEQC
,
4770 HNS_ROCE_CMD_TIMEOUT_MSECS
);
4772 dev_err(dev
, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn
);
4775 static void hns_roce_mhop_free_eq(struct hns_roce_dev
*hr_dev
,
4776 struct hns_roce_eq
*eq
)
4778 struct device
*dev
= hr_dev
->dev
;
4788 mhop_num
= hr_dev
->caps
.eqe_hop_num
;
4789 buf_chk_sz
= 1 << (hr_dev
->caps
.eqe_buf_pg_sz
+ PAGE_SHIFT
);
4790 bt_chk_sz
= 1 << (hr_dev
->caps
.eqe_ba_pg_sz
+ PAGE_SHIFT
);
4793 if (mhop_num
== HNS_ROCE_HOP_NUM_0
) {
4794 dma_free_coherent(dev
, (unsigned int)(eq
->entries
*
4795 eq
->eqe_size
), eq
->bt_l0
, eq
->l0_dma
);
4799 /* hop_num = 1 or hop = 2 */
4800 dma_free_coherent(dev
, bt_chk_sz
, eq
->bt_l0
, eq
->l0_dma
);
4801 if (mhop_num
== 1) {
4802 for (i
= 0; i
< eq
->l0_last_num
; i
++) {
4803 if (i
== eq
->l0_last_num
- 1) {
4804 eqe_alloc
= i
* (buf_chk_sz
/ eq
->eqe_size
);
4805 size
= (eq
->entries
- eqe_alloc
) * eq
->eqe_size
;
4806 dma_free_coherent(dev
, size
, eq
->buf
[i
],
4810 dma_free_coherent(dev
, buf_chk_sz
, eq
->buf
[i
],
4813 } else if (mhop_num
== 2) {
4814 for (i
= 0; i
< eq
->l0_last_num
; i
++) {
4815 dma_free_coherent(dev
, bt_chk_sz
, eq
->bt_l1
[i
],
4818 for (j
= 0; j
< bt_chk_sz
/ 8; j
++) {
4819 idx
= i
* (bt_chk_sz
/ 8) + j
;
4820 if ((i
== eq
->l0_last_num
- 1)
4821 && j
== eq
->l1_last_num
- 1) {
4822 eqe_alloc
= (buf_chk_sz
/ eq
->eqe_size
)
4824 size
= (eq
->entries
- eqe_alloc
)
4826 dma_free_coherent(dev
, size
,
4831 dma_free_coherent(dev
, buf_chk_sz
, eq
->buf
[idx
],
4846 static void hns_roce_v2_free_eq(struct hns_roce_dev
*hr_dev
,
4847 struct hns_roce_eq
*eq
)
4851 buf_chk_sz
= 1 << (eq
->eqe_buf_pg_sz
+ PAGE_SHIFT
);
4853 if (hr_dev
->caps
.eqe_hop_num
) {
4854 hns_roce_mhop_free_eq(hr_dev
, eq
);
4859 dma_free_coherent(hr_dev
->dev
, buf_chk_sz
,
4860 eq
->buf_list
->buf
, eq
->buf_list
->map
);
4863 static void hns_roce_config_eqc(struct hns_roce_dev
*hr_dev
,
4864 struct hns_roce_eq
*eq
,
4867 struct hns_roce_eq_context
*eqc
;
4870 memset(eqc
, 0, sizeof(struct hns_roce_eq_context
));
4873 eq
->doorbell
= hr_dev
->reg_base
+ ROCEE_VF_EQ_DB_CFG0_REG
;
4874 eq
->hop_num
= hr_dev
->caps
.eqe_hop_num
;
4876 eq
->over_ignore
= HNS_ROCE_V2_EQ_OVER_IGNORE_0
;
4877 eq
->coalesce
= HNS_ROCE_V2_EQ_COALESCE_0
;
4878 eq
->arm_st
= HNS_ROCE_V2_EQ_ALWAYS_ARMED
;
4879 eq
->eqe_ba_pg_sz
= hr_dev
->caps
.eqe_ba_pg_sz
;
4880 eq
->eqe_buf_pg_sz
= hr_dev
->caps
.eqe_buf_pg_sz
;
4881 eq
->shift
= ilog2((unsigned int)eq
->entries
);
4884 eq
->eqe_ba
= eq
->buf_list
->map
;
4886 eq
->eqe_ba
= eq
->l0_dma
;
4889 roce_set_field(eqc
->byte_4
,
4890 HNS_ROCE_EQC_EQ_ST_M
,
4891 HNS_ROCE_EQC_EQ_ST_S
,
4892 HNS_ROCE_V2_EQ_STATE_VALID
);
4894 /* set eqe hop num */
4895 roce_set_field(eqc
->byte_4
,
4896 HNS_ROCE_EQC_HOP_NUM_M
,
4897 HNS_ROCE_EQC_HOP_NUM_S
, eq
->hop_num
);
4899 /* set eqc over_ignore */
4900 roce_set_field(eqc
->byte_4
,
4901 HNS_ROCE_EQC_OVER_IGNORE_M
,
4902 HNS_ROCE_EQC_OVER_IGNORE_S
, eq
->over_ignore
);
4904 /* set eqc coalesce */
4905 roce_set_field(eqc
->byte_4
,
4906 HNS_ROCE_EQC_COALESCE_M
,
4907 HNS_ROCE_EQC_COALESCE_S
, eq
->coalesce
);
4909 /* set eqc arm_state */
4910 roce_set_field(eqc
->byte_4
,
4911 HNS_ROCE_EQC_ARM_ST_M
,
4912 HNS_ROCE_EQC_ARM_ST_S
, eq
->arm_st
);
4915 roce_set_field(eqc
->byte_4
,
4917 HNS_ROCE_EQC_EQN_S
, eq
->eqn
);
4920 roce_set_field(eqc
->byte_4
,
4921 HNS_ROCE_EQC_EQE_CNT_M
,
4922 HNS_ROCE_EQC_EQE_CNT_S
,
4923 HNS_ROCE_EQ_INIT_EQE_CNT
);
4925 /* set eqe_ba_pg_sz */
4926 roce_set_field(eqc
->byte_8
,
4927 HNS_ROCE_EQC_BA_PG_SZ_M
,
4928 HNS_ROCE_EQC_BA_PG_SZ_S
,
4929 eq
->eqe_ba_pg_sz
+ PG_SHIFT_OFFSET
);
4931 /* set eqe_buf_pg_sz */
4932 roce_set_field(eqc
->byte_8
,
4933 HNS_ROCE_EQC_BUF_PG_SZ_M
,
4934 HNS_ROCE_EQC_BUF_PG_SZ_S
,
4935 eq
->eqe_buf_pg_sz
+ PG_SHIFT_OFFSET
);
4937 /* set eq_producer_idx */
4938 roce_set_field(eqc
->byte_8
,
4939 HNS_ROCE_EQC_PROD_INDX_M
,
4940 HNS_ROCE_EQC_PROD_INDX_S
,
4941 HNS_ROCE_EQ_INIT_PROD_IDX
);
4943 /* set eq_max_cnt */
4944 roce_set_field(eqc
->byte_12
,
4945 HNS_ROCE_EQC_MAX_CNT_M
,
4946 HNS_ROCE_EQC_MAX_CNT_S
, eq
->eq_max_cnt
);
4949 roce_set_field(eqc
->byte_12
,
4950 HNS_ROCE_EQC_PERIOD_M
,
4951 HNS_ROCE_EQC_PERIOD_S
, eq
->eq_period
);
4953 /* set eqe_report_timer */
4954 roce_set_field(eqc
->eqe_report_timer
,
4955 HNS_ROCE_EQC_REPORT_TIMER_M
,
4956 HNS_ROCE_EQC_REPORT_TIMER_S
,
4957 HNS_ROCE_EQ_INIT_REPORT_TIMER
);
4959 /* set eqe_ba [34:3] */
4960 roce_set_field(eqc
->eqe_ba0
,
4961 HNS_ROCE_EQC_EQE_BA_L_M
,
4962 HNS_ROCE_EQC_EQE_BA_L_S
, eq
->eqe_ba
>> 3);
4964 /* set eqe_ba [64:35] */
4965 roce_set_field(eqc
->eqe_ba1
,
4966 HNS_ROCE_EQC_EQE_BA_H_M
,
4967 HNS_ROCE_EQC_EQE_BA_H_S
, eq
->eqe_ba
>> 35);
4970 roce_set_field(eqc
->byte_28
,
4971 HNS_ROCE_EQC_SHIFT_M
,
4972 HNS_ROCE_EQC_SHIFT_S
, eq
->shift
);
4974 /* set eq MSI_IDX */
4975 roce_set_field(eqc
->byte_28
,
4976 HNS_ROCE_EQC_MSI_INDX_M
,
4977 HNS_ROCE_EQC_MSI_INDX_S
,
4978 HNS_ROCE_EQ_INIT_MSI_IDX
);
4980 /* set cur_eqe_ba [27:12] */
4981 roce_set_field(eqc
->byte_28
,
4982 HNS_ROCE_EQC_CUR_EQE_BA_L_M
,
4983 HNS_ROCE_EQC_CUR_EQE_BA_L_S
, eq
->cur_eqe_ba
>> 12);
4985 /* set cur_eqe_ba [59:28] */
4986 roce_set_field(eqc
->byte_32
,
4987 HNS_ROCE_EQC_CUR_EQE_BA_M_M
,
4988 HNS_ROCE_EQC_CUR_EQE_BA_M_S
, eq
->cur_eqe_ba
>> 28);
4990 /* set cur_eqe_ba [63:60] */
4991 roce_set_field(eqc
->byte_36
,
4992 HNS_ROCE_EQC_CUR_EQE_BA_H_M
,
4993 HNS_ROCE_EQC_CUR_EQE_BA_H_S
, eq
->cur_eqe_ba
>> 60);
4995 /* set eq consumer idx */
4996 roce_set_field(eqc
->byte_36
,
4997 HNS_ROCE_EQC_CONS_INDX_M
,
4998 HNS_ROCE_EQC_CONS_INDX_S
,
4999 HNS_ROCE_EQ_INIT_CONS_IDX
);
5001 /* set nex_eqe_ba[43:12] */
5002 roce_set_field(eqc
->nxt_eqe_ba0
,
5003 HNS_ROCE_EQC_NXT_EQE_BA_L_M
,
5004 HNS_ROCE_EQC_NXT_EQE_BA_L_S
, eq
->nxt_eqe_ba
>> 12);
5006 /* set nex_eqe_ba[63:44] */
5007 roce_set_field(eqc
->nxt_eqe_ba1
,
5008 HNS_ROCE_EQC_NXT_EQE_BA_H_M
,
5009 HNS_ROCE_EQC_NXT_EQE_BA_H_S
, eq
->nxt_eqe_ba
>> 44);
5012 static int hns_roce_mhop_alloc_eq(struct hns_roce_dev
*hr_dev
,
5013 struct hns_roce_eq
*eq
)
5015 struct device
*dev
= hr_dev
->dev
;
5016 int eq_alloc_done
= 0;
5031 mhop_num
= hr_dev
->caps
.eqe_hop_num
;
5032 buf_chk_sz
= 1 << (hr_dev
->caps
.eqe_buf_pg_sz
+ PAGE_SHIFT
);
5033 bt_chk_sz
= 1 << (hr_dev
->caps
.eqe_ba_pg_sz
+ PAGE_SHIFT
);
5035 ba_num
= (PAGE_ALIGN(eq
->entries
* eq
->eqe_size
) + buf_chk_sz
- 1)
5037 bt_num
= (ba_num
+ bt_chk_sz
/ 8 - 1) / (bt_chk_sz
/ 8);
5040 if (mhop_num
== HNS_ROCE_HOP_NUM_0
) {
5041 if (eq
->entries
> buf_chk_sz
/ eq
->eqe_size
) {
5042 dev_err(dev
, "eq entries %d is larger than buf_pg_sz!",
5046 eq
->bt_l0
= dma_alloc_coherent(dev
, eq
->entries
* eq
->eqe_size
,
5047 &(eq
->l0_dma
), GFP_KERNEL
);
5051 eq
->cur_eqe_ba
= eq
->l0_dma
;
5054 memset(eq
->bt_l0
, 0, eq
->entries
* eq
->eqe_size
);
5059 eq
->buf_dma
= kcalloc(ba_num
, sizeof(*eq
->buf_dma
), GFP_KERNEL
);
5062 eq
->buf
= kcalloc(ba_num
, sizeof(*eq
->buf
), GFP_KERNEL
);
5064 goto err_kcalloc_buf
;
5066 if (mhop_num
== 2) {
5067 eq
->l1_dma
= kcalloc(bt_num
, sizeof(*eq
->l1_dma
), GFP_KERNEL
);
5069 goto err_kcalloc_l1_dma
;
5071 eq
->bt_l1
= kcalloc(bt_num
, sizeof(*eq
->bt_l1
), GFP_KERNEL
);
5073 goto err_kcalloc_bt_l1
;
5077 eq
->bt_l0
= dma_alloc_coherent(dev
, bt_chk_sz
, &eq
->l0_dma
, GFP_KERNEL
);
5079 goto err_dma_alloc_l0
;
5081 if (mhop_num
== 1) {
5082 if (ba_num
> (bt_chk_sz
/ 8))
5083 dev_err(dev
, "ba_num %d is too large for 1 hop\n",
5087 for (i
= 0; i
< bt_chk_sz
/ 8; i
++) {
5088 if (eq_buf_cnt
+ 1 < ba_num
) {
5091 eqe_alloc
= i
* (buf_chk_sz
/ eq
->eqe_size
);
5092 size
= (eq
->entries
- eqe_alloc
) * eq
->eqe_size
;
5094 eq
->buf
[i
] = dma_alloc_coherent(dev
, size
,
5098 goto err_dma_alloc_buf
;
5100 *(eq
->bt_l0
+ i
) = eq
->buf_dma
[i
];
5103 if (eq_buf_cnt
>= ba_num
)
5106 eq
->cur_eqe_ba
= eq
->buf_dma
[0];
5107 eq
->nxt_eqe_ba
= eq
->buf_dma
[1];
5109 } else if (mhop_num
== 2) {
5110 /* alloc L1 BT and buf */
5111 for (i
= 0; i
< bt_chk_sz
/ 8; i
++) {
5112 eq
->bt_l1
[i
] = dma_alloc_coherent(dev
, bt_chk_sz
,
5116 goto err_dma_alloc_l1
;
5117 *(eq
->bt_l0
+ i
) = eq
->l1_dma
[i
];
5119 for (j
= 0; j
< bt_chk_sz
/ 8; j
++) {
5120 idx
= i
* bt_chk_sz
/ 8 + j
;
5121 if (eq_buf_cnt
+ 1 < ba_num
) {
5124 eqe_alloc
= (buf_chk_sz
/ eq
->eqe_size
)
5126 size
= (eq
->entries
- eqe_alloc
)
5129 eq
->buf
[idx
] = dma_alloc_coherent(dev
, size
,
5130 &(eq
->buf_dma
[idx
]),
5133 goto err_dma_alloc_buf
;
5135 *(eq
->bt_l1
[i
] + j
) = eq
->buf_dma
[idx
];
5138 if (eq_buf_cnt
>= ba_num
) {
5147 eq
->cur_eqe_ba
= eq
->buf_dma
[0];
5148 eq
->nxt_eqe_ba
= eq
->buf_dma
[1];
5151 eq
->l0_last_num
= i
+ 1;
5153 eq
->l1_last_num
= j
+ 1;
5158 dma_free_coherent(dev
, bt_chk_sz
, eq
->bt_l0
, eq
->l0_dma
);
5161 for (i
-= 1; i
>= 0; i
--) {
5162 dma_free_coherent(dev
, bt_chk_sz
, eq
->bt_l1
[i
],
5165 for (j
= 0; j
< bt_chk_sz
/ 8; j
++) {
5166 idx
= i
* bt_chk_sz
/ 8 + j
;
5167 dma_free_coherent(dev
, buf_chk_sz
, eq
->buf
[idx
],
5171 goto err_dma_alloc_l0
;
5174 dma_free_coherent(dev
, bt_chk_sz
, eq
->bt_l0
, eq
->l0_dma
);
5179 for (i
-= 1; i
>= 0; i
--)
5180 dma_free_coherent(dev
, buf_chk_sz
, eq
->buf
[i
],
5182 else if (mhop_num
== 2) {
5185 for (; i
>= 0; i
--) {
5186 dma_free_coherent(dev
, bt_chk_sz
, eq
->bt_l1
[i
],
5189 for (j
= 0; j
< bt_chk_sz
/ 8; j
++) {
5190 if (i
== record_i
&& j
>= record_j
)
5193 idx
= i
* bt_chk_sz
/ 8 + j
;
5194 dma_free_coherent(dev
, buf_chk_sz
,
5220 static int hns_roce_v2_create_eq(struct hns_roce_dev
*hr_dev
,
5221 struct hns_roce_eq
*eq
,
5222 unsigned int eq_cmd
)
5224 struct device
*dev
= hr_dev
->dev
;
5225 struct hns_roce_cmd_mailbox
*mailbox
;
5229 /* Allocate mailbox memory */
5230 mailbox
= hns_roce_alloc_cmd_mailbox(hr_dev
);
5231 if (IS_ERR(mailbox
))
5232 return PTR_ERR(mailbox
);
5234 if (!hr_dev
->caps
.eqe_hop_num
) {
5235 buf_chk_sz
= 1 << (hr_dev
->caps
.eqe_buf_pg_sz
+ PAGE_SHIFT
);
5237 eq
->buf_list
= kzalloc(sizeof(struct hns_roce_buf_list
),
5239 if (!eq
->buf_list
) {
5244 eq
->buf_list
->buf
= dma_alloc_coherent(dev
, buf_chk_sz
,
5245 &(eq
->buf_list
->map
),
5247 if (!eq
->buf_list
->buf
) {
5253 ret
= hns_roce_mhop_alloc_eq(hr_dev
, eq
);
5260 hns_roce_config_eqc(hr_dev
, eq
, mailbox
->buf
);
5262 ret
= hns_roce_cmd_mbox(hr_dev
, mailbox
->dma
, 0, eq
->eqn
, 0,
5263 eq_cmd
, HNS_ROCE_CMD_TIMEOUT_MSECS
);
5265 dev_err(dev
, "[mailbox cmd] create eqc failed.\n");
5269 hns_roce_free_cmd_mailbox(hr_dev
, mailbox
);
5274 if (!hr_dev
->caps
.eqe_hop_num
)
5275 dma_free_coherent(dev
, buf_chk_sz
, eq
->buf_list
->buf
,
5278 hns_roce_mhop_free_eq(hr_dev
, eq
);
5283 kfree(eq
->buf_list
);
5286 hns_roce_free_cmd_mailbox(hr_dev
, mailbox
);
5291 static int hns_roce_v2_init_eq_table(struct hns_roce_dev
*hr_dev
)
5293 struct hns_roce_eq_table
*eq_table
= &hr_dev
->eq_table
;
5294 struct device
*dev
= hr_dev
->dev
;
5295 struct hns_roce_eq
*eq
;
5296 unsigned int eq_cmd
;
5305 other_num
= hr_dev
->caps
.num_other_vectors
;
5306 comp_num
= hr_dev
->caps
.num_comp_vectors
;
5307 aeq_num
= hr_dev
->caps
.num_aeq_vectors
;
5309 eq_num
= comp_num
+ aeq_num
;
5310 irq_num
= eq_num
+ other_num
;
5312 eq_table
->eq
= kcalloc(eq_num
, sizeof(*eq_table
->eq
), GFP_KERNEL
);
5316 for (i
= 0; i
< irq_num
; i
++) {
5317 hr_dev
->irq_names
[i
] = kzalloc(HNS_ROCE_INT_NAME_LEN
,
5319 if (!hr_dev
->irq_names
[i
]) {
5321 goto err_failed_kzalloc
;
5326 for (j
= 0; j
< eq_num
; j
++) {
5327 eq
= &eq_table
->eq
[j
];
5328 eq
->hr_dev
= hr_dev
;
5332 eq_cmd
= HNS_ROCE_CMD_CREATE_CEQC
;
5333 eq
->type_flag
= HNS_ROCE_CEQ
;
5334 eq
->entries
= hr_dev
->caps
.ceqe_depth
;
5335 eq
->eqe_size
= HNS_ROCE_CEQ_ENTRY_SIZE
;
5336 eq
->irq
= hr_dev
->irq
[j
+ other_num
+ aeq_num
];
5337 eq
->eq_max_cnt
= HNS_ROCE_CEQ_DEFAULT_BURST_NUM
;
5338 eq
->eq_period
= HNS_ROCE_CEQ_DEFAULT_INTERVAL
;
5341 eq_cmd
= HNS_ROCE_CMD_CREATE_AEQC
;
5342 eq
->type_flag
= HNS_ROCE_AEQ
;
5343 eq
->entries
= hr_dev
->caps
.aeqe_depth
;
5344 eq
->eqe_size
= HNS_ROCE_AEQ_ENTRY_SIZE
;
5345 eq
->irq
= hr_dev
->irq
[j
- comp_num
+ other_num
];
5346 eq
->eq_max_cnt
= HNS_ROCE_AEQ_DEFAULT_BURST_NUM
;
5347 eq
->eq_period
= HNS_ROCE_AEQ_DEFAULT_INTERVAL
;
5350 ret
= hns_roce_v2_create_eq(hr_dev
, eq
, eq_cmd
);
5352 dev_err(dev
, "eq create failed.\n");
5353 goto err_create_eq_fail
;
5358 hns_roce_v2_int_mask_enable(hr_dev
, eq_num
, EQ_ENABLE
);
5360 /* irq contains: abnormal + AEQ + CEQ*/
5361 for (k
= 0; k
< irq_num
; k
++)
5363 snprintf((char *)hr_dev
->irq_names
[k
],
5364 HNS_ROCE_INT_NAME_LEN
, "hns-abn-%d", k
);
5365 else if (k
< (other_num
+ aeq_num
))
5366 snprintf((char *)hr_dev
->irq_names
[k
],
5367 HNS_ROCE_INT_NAME_LEN
, "hns-aeq-%d",
5370 snprintf((char *)hr_dev
->irq_names
[k
],
5371 HNS_ROCE_INT_NAME_LEN
, "hns-ceq-%d",
5372 k
- other_num
- aeq_num
);
5374 for (k
= 0; k
< irq_num
; k
++) {
5376 ret
= request_irq(hr_dev
->irq
[k
],
5377 hns_roce_v2_msix_interrupt_abn
,
5378 0, hr_dev
->irq_names
[k
], hr_dev
);
5380 else if (k
< (other_num
+ comp_num
))
5381 ret
= request_irq(eq_table
->eq
[k
- other_num
].irq
,
5382 hns_roce_v2_msix_interrupt_eq
,
5383 0, hr_dev
->irq_names
[k
+ aeq_num
],
5384 &eq_table
->eq
[k
- other_num
]);
5386 ret
= request_irq(eq_table
->eq
[k
- other_num
].irq
,
5387 hns_roce_v2_msix_interrupt_eq
,
5388 0, hr_dev
->irq_names
[k
- comp_num
],
5389 &eq_table
->eq
[k
- other_num
]);
5391 dev_err(dev
, "Request irq error!\n");
5392 goto err_request_irq_fail
;
5397 create_singlethread_workqueue("hns_roce_irq_workqueue");
5398 if (!hr_dev
->irq_workq
) {
5399 dev_err(dev
, "Create irq workqueue failed!\n");
5401 goto err_request_irq_fail
;
5406 err_request_irq_fail
:
5407 for (k
-= 1; k
>= 0; k
--)
5409 free_irq(hr_dev
->irq
[k
], hr_dev
);
5411 free_irq(eq_table
->eq
[k
- other_num
].irq
,
5412 &eq_table
->eq
[k
- other_num
]);
5415 for (j
-= 1; j
>= 0; j
--)
5416 hns_roce_v2_free_eq(hr_dev
, &eq_table
->eq
[j
]);
5419 for (i
-= 1; i
>= 0; i
--)
5420 kfree(hr_dev
->irq_names
[i
]);
5421 kfree(eq_table
->eq
);
5426 static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev
*hr_dev
)
5428 struct hns_roce_eq_table
*eq_table
= &hr_dev
->eq_table
;
5433 eq_num
= hr_dev
->caps
.num_comp_vectors
+ hr_dev
->caps
.num_aeq_vectors
;
5434 irq_num
= eq_num
+ hr_dev
->caps
.num_other_vectors
;
5437 hns_roce_v2_int_mask_enable(hr_dev
, eq_num
, EQ_DISABLE
);
5439 for (i
= 0; i
< hr_dev
->caps
.num_other_vectors
; i
++)
5440 free_irq(hr_dev
->irq
[i
], hr_dev
);
5442 for (i
= 0; i
< eq_num
; i
++) {
5443 hns_roce_v2_destroy_eqc(hr_dev
, i
);
5445 free_irq(eq_table
->eq
[i
].irq
, &eq_table
->eq
[i
]);
5447 hns_roce_v2_free_eq(hr_dev
, &eq_table
->eq
[i
]);
5450 for (i
= 0; i
< irq_num
; i
++)
5451 kfree(hr_dev
->irq_names
[i
]);
5453 kfree(eq_table
->eq
);
5455 flush_workqueue(hr_dev
->irq_workq
);
5456 destroy_workqueue(hr_dev
->irq_workq
);
5459 static void hns_roce_v2_write_srqc(struct hns_roce_dev
*hr_dev
,
5460 struct hns_roce_srq
*srq
, u32 pdn
, u16 xrcd
,
5461 u32 cqn
, void *mb_buf
, u64
*mtts_wqe
,
5462 u64
*mtts_idx
, dma_addr_t dma_handle_wqe
,
5463 dma_addr_t dma_handle_idx
)
5465 struct hns_roce_srq_context
*srq_context
;
5467 srq_context
= mb_buf
;
5468 memset(srq_context
, 0, sizeof(*srq_context
));
5470 roce_set_field(srq_context
->byte_4_srqn_srqst
, SRQC_BYTE_4_SRQ_ST_M
,
5471 SRQC_BYTE_4_SRQ_ST_S
, 1);
5473 roce_set_field(srq_context
->byte_4_srqn_srqst
,
5474 SRQC_BYTE_4_SRQ_WQE_HOP_NUM_M
,
5475 SRQC_BYTE_4_SRQ_WQE_HOP_NUM_S
,
5476 (hr_dev
->caps
.srqwqe_hop_num
== HNS_ROCE_HOP_NUM_0
? 0 :
5477 hr_dev
->caps
.srqwqe_hop_num
));
5478 roce_set_field(srq_context
->byte_4_srqn_srqst
,
5479 SRQC_BYTE_4_SRQ_SHIFT_M
, SRQC_BYTE_4_SRQ_SHIFT_S
,
5482 roce_set_field(srq_context
->byte_4_srqn_srqst
, SRQC_BYTE_4_SRQN_M
,
5483 SRQC_BYTE_4_SRQN_S
, srq
->srqn
);
5485 roce_set_field(srq_context
->byte_8_limit_wl
, SRQC_BYTE_8_SRQ_LIMIT_WL_M
,
5486 SRQC_BYTE_8_SRQ_LIMIT_WL_S
, 0);
5488 roce_set_field(srq_context
->byte_12_xrcd
, SRQC_BYTE_12_SRQ_XRCD_M
,
5489 SRQC_BYTE_12_SRQ_XRCD_S
, xrcd
);
5491 srq_context
->wqe_bt_ba
= cpu_to_le32((u32
)(dma_handle_wqe
>> 3));
5493 roce_set_field(srq_context
->byte_24_wqe_bt_ba
,
5494 SRQC_BYTE_24_SRQ_WQE_BT_BA_M
,
5495 SRQC_BYTE_24_SRQ_WQE_BT_BA_S
,
5496 cpu_to_le32(dma_handle_wqe
>> 35));
5498 roce_set_field(srq_context
->byte_28_rqws_pd
, SRQC_BYTE_28_PD_M
,
5499 SRQC_BYTE_28_PD_S
, pdn
);
5500 roce_set_field(srq_context
->byte_28_rqws_pd
, SRQC_BYTE_28_RQWS_M
,
5501 SRQC_BYTE_28_RQWS_S
, srq
->max_gs
<= 0 ? 0 :
5502 fls(srq
->max_gs
- 1));
5504 srq_context
->idx_bt_ba
= (u32
)(dma_handle_idx
>> 3);
5505 srq_context
->idx_bt_ba
= cpu_to_le32(srq_context
->idx_bt_ba
);
5506 roce_set_field(srq_context
->rsv_idx_bt_ba
,
5507 SRQC_BYTE_36_SRQ_IDX_BT_BA_M
,
5508 SRQC_BYTE_36_SRQ_IDX_BT_BA_S
,
5509 cpu_to_le32(dma_handle_idx
>> 35));
5511 srq_context
->idx_cur_blk_addr
= (u32
)(mtts_idx
[0] >> PAGE_ADDR_SHIFT
);
5512 srq_context
->idx_cur_blk_addr
=
5513 cpu_to_le32(srq_context
->idx_cur_blk_addr
);
5514 roce_set_field(srq_context
->byte_44_idxbufpgsz_addr
,
5515 SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_M
,
5516 SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_S
,
5517 cpu_to_le32((mtts_idx
[0]) >> (32 + PAGE_ADDR_SHIFT
)));
5518 roce_set_field(srq_context
->byte_44_idxbufpgsz_addr
,
5519 SRQC_BYTE_44_SRQ_IDX_HOP_NUM_M
,
5520 SRQC_BYTE_44_SRQ_IDX_HOP_NUM_S
,
5521 hr_dev
->caps
.idx_hop_num
== HNS_ROCE_HOP_NUM_0
? 0 :
5522 hr_dev
->caps
.idx_hop_num
);
5524 roce_set_field(srq_context
->byte_44_idxbufpgsz_addr
,
5525 SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_M
,
5526 SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_S
,
5527 hr_dev
->caps
.idx_ba_pg_sz
);
5528 roce_set_field(srq_context
->byte_44_idxbufpgsz_addr
,
5529 SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_M
,
5530 SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_S
,
5531 hr_dev
->caps
.idx_buf_pg_sz
);
5533 srq_context
->idx_nxt_blk_addr
= (u32
)(mtts_idx
[1] >> PAGE_ADDR_SHIFT
);
5534 srq_context
->idx_nxt_blk_addr
=
5535 cpu_to_le32(srq_context
->idx_nxt_blk_addr
);
5536 roce_set_field(srq_context
->rsv_idxnxtblkaddr
,
5537 SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_M
,
5538 SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_S
,
5539 cpu_to_le32((mtts_idx
[1]) >> (32 + PAGE_ADDR_SHIFT
)));
5540 roce_set_field(srq_context
->byte_56_xrc_cqn
,
5541 SRQC_BYTE_56_SRQ_XRC_CQN_M
, SRQC_BYTE_56_SRQ_XRC_CQN_S
,
5543 roce_set_field(srq_context
->byte_56_xrc_cqn
,
5544 SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_M
,
5545 SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_S
,
5546 hr_dev
->caps
.srqwqe_ba_pg_sz
+ PG_SHIFT_OFFSET
);
5547 roce_set_field(srq_context
->byte_56_xrc_cqn
,
5548 SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_M
,
5549 SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_S
,
5550 hr_dev
->caps
.srqwqe_buf_pg_sz
+ PG_SHIFT_OFFSET
);
5552 roce_set_bit(srq_context
->db_record_addr_record_en
,
5553 SRQC_BYTE_60_SRQ_RECORD_EN_S
, 0);
5556 static int hns_roce_v2_modify_srq(struct ib_srq
*ibsrq
,
5557 struct ib_srq_attr
*srq_attr
,
5558 enum ib_srq_attr_mask srq_attr_mask
,
5559 struct ib_udata
*udata
)
5561 struct hns_roce_dev
*hr_dev
= to_hr_dev(ibsrq
->device
);
5562 struct hns_roce_srq
*srq
= to_hr_srq(ibsrq
);
5563 struct hns_roce_srq_context
*srq_context
;
5564 struct hns_roce_srq_context
*srqc_mask
;
5565 struct hns_roce_cmd_mailbox
*mailbox
;
5568 if (srq_attr_mask
& IB_SRQ_LIMIT
) {
5569 if (srq_attr
->srq_limit
>= srq
->max
)
5572 mailbox
= hns_roce_alloc_cmd_mailbox(hr_dev
);
5573 if (IS_ERR(mailbox
))
5574 return PTR_ERR(mailbox
);
5576 srq_context
= mailbox
->buf
;
5577 srqc_mask
= (struct hns_roce_srq_context
*)mailbox
->buf
+ 1;
5579 memset(srqc_mask
, 0xff, sizeof(*srqc_mask
));
5581 roce_set_field(srq_context
->byte_8_limit_wl
,
5582 SRQC_BYTE_8_SRQ_LIMIT_WL_M
,
5583 SRQC_BYTE_8_SRQ_LIMIT_WL_S
, srq_attr
->srq_limit
);
5584 roce_set_field(srqc_mask
->byte_8_limit_wl
,
5585 SRQC_BYTE_8_SRQ_LIMIT_WL_M
,
5586 SRQC_BYTE_8_SRQ_LIMIT_WL_S
, 0);
5588 ret
= hns_roce_cmd_mbox(hr_dev
, mailbox
->dma
, 0, srq
->srqn
, 0,
5589 HNS_ROCE_CMD_MODIFY_SRQC
,
5590 HNS_ROCE_CMD_TIMEOUT_MSECS
);
5591 hns_roce_free_cmd_mailbox(hr_dev
, mailbox
);
5593 dev_err(hr_dev
->dev
,
5594 "MODIFY SRQ Failed to cmd mailbox.\n");
5602 int hns_roce_v2_query_srq(struct ib_srq
*ibsrq
, struct ib_srq_attr
*attr
)
5604 struct hns_roce_dev
*hr_dev
= to_hr_dev(ibsrq
->device
);
5605 struct hns_roce_srq
*srq
= to_hr_srq(ibsrq
);
5606 struct hns_roce_srq_context
*srq_context
;
5607 struct hns_roce_cmd_mailbox
*mailbox
;
5611 mailbox
= hns_roce_alloc_cmd_mailbox(hr_dev
);
5612 if (IS_ERR(mailbox
))
5613 return PTR_ERR(mailbox
);
5615 srq_context
= mailbox
->buf
;
5616 ret
= hns_roce_cmd_mbox(hr_dev
, 0, mailbox
->dma
, srq
->srqn
, 0,
5617 HNS_ROCE_CMD_QUERY_SRQC
,
5618 HNS_ROCE_CMD_TIMEOUT_MSECS
);
5620 dev_err(hr_dev
->dev
, "QUERY SRQ cmd process error\n");
5624 limit_wl
= roce_get_field(srq_context
->byte_8_limit_wl
,
5625 SRQC_BYTE_8_SRQ_LIMIT_WL_M
,
5626 SRQC_BYTE_8_SRQ_LIMIT_WL_S
);
5628 attr
->srq_limit
= limit_wl
;
5629 attr
->max_wr
= srq
->max
- 1;
5630 attr
->max_sge
= srq
->max_gs
;
5632 memcpy(srq_context
, mailbox
->buf
, sizeof(*srq_context
));
5635 hns_roce_free_cmd_mailbox(hr_dev
, mailbox
);
5639 static int find_empty_entry(struct hns_roce_idx_que
*idx_que
)
5644 /* bitmap[i] is set zero if all bits are allocated */
5645 for (i
= 0; idx_que
->bitmap
[i
] == 0; ++i
)
5647 bit_num
= ffs(idx_que
->bitmap
[i
]);
5648 idx_que
->bitmap
[i
] &= ~(1ULL << (bit_num
- 1));
5650 return i
* sizeof(u64
) * 8 + (bit_num
- 1);
5653 static void fill_idx_queue(struct hns_roce_idx_que
*idx_que
,
5654 int cur_idx
, int wqe_idx
)
5658 addr
= (unsigned int *)hns_roce_buf_offset(&idx_que
->idx_buf
,
5659 cur_idx
* idx_que
->entry_sz
);
5663 static int hns_roce_v2_post_srq_recv(struct ib_srq
*ibsrq
,
5664 const struct ib_recv_wr
*wr
,
5665 const struct ib_recv_wr
**bad_wr
)
5667 struct hns_roce_srq
*srq
= to_hr_srq(ibsrq
);
5668 struct hns_roce_v2_wqe_data_seg
*dseg
;
5669 struct hns_roce_v2_db srq_db
;
5670 unsigned long flags
;
5678 spin_lock_irqsave(&srq
->lock
, flags
);
5680 ind
= srq
->head
& (srq
->max
- 1);
5682 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
5683 if (unlikely(wr
->num_sge
> srq
->max_gs
)) {
5689 if (unlikely(srq
->head
== srq
->tail
)) {
5695 wqe_idx
= find_empty_entry(&srq
->idx_que
);
5696 fill_idx_queue(&srq
->idx_que
, ind
, wqe_idx
);
5697 wqe
= get_srq_wqe(srq
, wqe_idx
);
5698 dseg
= (struct hns_roce_v2_wqe_data_seg
*)wqe
;
5700 for (i
= 0; i
< wr
->num_sge
; ++i
) {
5701 dseg
[i
].len
= cpu_to_le32(wr
->sg_list
[i
].length
);
5702 dseg
[i
].lkey
= cpu_to_le32(wr
->sg_list
[i
].lkey
);
5703 dseg
[i
].addr
= cpu_to_le64(wr
->sg_list
[i
].addr
);
5706 if (i
< srq
->max_gs
) {
5708 dseg
->lkey
= cpu_to_le32(0x100);
5712 srq
->wrid
[wqe_idx
] = wr
->wr_id
;
5713 ind
= (ind
+ 1) & (srq
->max
- 1);
5720 * Make sure that descriptors are written before
5725 srq_db
.byte_4
= HNS_ROCE_V2_SRQ_DB
<< 24 | srq
->srqn
;
5726 srq_db
.parameter
= srq
->head
;
5728 hns_roce_write64_k((__le32
*)&srq_db
, srq
->db_reg_l
);
5732 spin_unlock_irqrestore(&srq
->lock
, flags
);
5737 static const struct ib_device_ops hns_roce_v2_dev_ops
= {
5738 .destroy_qp
= hns_roce_v2_destroy_qp
,
5739 .modify_cq
= hns_roce_v2_modify_cq
,
5740 .poll_cq
= hns_roce_v2_poll_cq
,
5741 .post_recv
= hns_roce_v2_post_recv
,
5742 .post_send
= hns_roce_v2_post_send
,
5743 .query_qp
= hns_roce_v2_query_qp
,
5744 .req_notify_cq
= hns_roce_v2_req_notify_cq
,
5747 static const struct ib_device_ops hns_roce_v2_dev_srq_ops
= {
5748 .modify_srq
= hns_roce_v2_modify_srq
,
5749 .post_srq_recv
= hns_roce_v2_post_srq_recv
,
5750 .query_srq
= hns_roce_v2_query_srq
,
5753 static const struct hns_roce_hw hns_roce_hw_v2
= {
5754 .cmq_init
= hns_roce_v2_cmq_init
,
5755 .cmq_exit
= hns_roce_v2_cmq_exit
,
5756 .hw_profile
= hns_roce_v2_profile
,
5757 .hw_init
= hns_roce_v2_init
,
5758 .hw_exit
= hns_roce_v2_exit
,
5759 .post_mbox
= hns_roce_v2_post_mbox
,
5760 .chk_mbox
= hns_roce_v2_chk_mbox
,
5761 .set_gid
= hns_roce_v2_set_gid
,
5762 .set_mac
= hns_roce_v2_set_mac
,
5763 .write_mtpt
= hns_roce_v2_write_mtpt
,
5764 .rereg_write_mtpt
= hns_roce_v2_rereg_write_mtpt
,
5765 .frmr_write_mtpt
= hns_roce_v2_frmr_write_mtpt
,
5766 .mw_write_mtpt
= hns_roce_v2_mw_write_mtpt
,
5767 .write_cqc
= hns_roce_v2_write_cqc
,
5768 .set_hem
= hns_roce_v2_set_hem
,
5769 .clear_hem
= hns_roce_v2_clear_hem
,
5770 .modify_qp
= hns_roce_v2_modify_qp
,
5771 .query_qp
= hns_roce_v2_query_qp
,
5772 .destroy_qp
= hns_roce_v2_destroy_qp
,
5773 .modify_cq
= hns_roce_v2_modify_cq
,
5774 .post_send
= hns_roce_v2_post_send
,
5775 .post_recv
= hns_roce_v2_post_recv
,
5776 .req_notify_cq
= hns_roce_v2_req_notify_cq
,
5777 .poll_cq
= hns_roce_v2_poll_cq
,
5778 .init_eq
= hns_roce_v2_init_eq_table
,
5779 .cleanup_eq
= hns_roce_v2_cleanup_eq_table
,
5780 .write_srqc
= hns_roce_v2_write_srqc
,
5781 .modify_srq
= hns_roce_v2_modify_srq
,
5782 .query_srq
= hns_roce_v2_query_srq
,
5783 .post_srq_recv
= hns_roce_v2_post_srq_recv
,
5784 .hns_roce_dev_ops
= &hns_roce_v2_dev_ops
,
5785 .hns_roce_dev_srq_ops
= &hns_roce_v2_dev_srq_ops
,
5788 static const struct pci_device_id hns_roce_hw_v2_pci_tbl
[] = {
5789 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA
), 0},
5790 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA_MACSEC
), 0},
5791 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA
), 0},
5792 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA_MACSEC
), 0},
5793 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_RDMA_MACSEC
), 0},
5794 /* required last entry */
5798 MODULE_DEVICE_TABLE(pci
, hns_roce_hw_v2_pci_tbl
);
5800 static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev
*hr_dev
,
5801 struct hnae3_handle
*handle
)
5803 const struct pci_device_id
*id
;
5806 id
= pci_match_id(hns_roce_hw_v2_pci_tbl
, hr_dev
->pci_dev
);
5808 dev_err(hr_dev
->dev
, "device is not compatible!\n");
5812 hr_dev
->hw
= &hns_roce_hw_v2
;
5813 hr_dev
->sdb_offset
= ROCEE_DB_SQ_L_0_REG
;
5814 hr_dev
->odb_offset
= hr_dev
->sdb_offset
;
5816 /* Get info from NIC driver. */
5817 hr_dev
->reg_base
= handle
->rinfo
.roce_io_base
;
5818 hr_dev
->caps
.num_ports
= 1;
5819 hr_dev
->iboe
.netdevs
[0] = handle
->rinfo
.netdev
;
5820 hr_dev
->iboe
.phy_port
[0] = 0;
5822 addrconf_addr_eui48((u8
*)&hr_dev
->ib_dev
.node_guid
,
5823 hr_dev
->iboe
.netdevs
[0]->dev_addr
);
5825 for (i
= 0; i
< HNS_ROCE_V2_MAX_IRQ_NUM
; i
++)
5826 hr_dev
->irq
[i
] = pci_irq_vector(handle
->pdev
,
5827 i
+ handle
->rinfo
.base_vector
);
5829 /* cmd issue mode: 0 is poll, 1 is event */
5830 hr_dev
->cmd_mod
= 1;
5831 hr_dev
->loop_idc
= 0;
5836 static int hns_roce_hw_v2_init_instance(struct hnae3_handle
*handle
)
5838 struct hns_roce_dev
*hr_dev
;
5841 hr_dev
= (struct hns_roce_dev
*)ib_alloc_device(sizeof(*hr_dev
));
5845 hr_dev
->priv
= kzalloc(sizeof(struct hns_roce_v2_priv
), GFP_KERNEL
);
5846 if (!hr_dev
->priv
) {
5848 goto error_failed_kzalloc
;
5851 hr_dev
->pci_dev
= handle
->pdev
;
5852 hr_dev
->dev
= &handle
->pdev
->dev
;
5853 handle
->priv
= hr_dev
;
5855 ret
= hns_roce_hw_v2_get_cfg(hr_dev
, handle
);
5857 dev_err(hr_dev
->dev
, "Get Configuration failed!\n");
5858 goto error_failed_get_cfg
;
5861 ret
= hns_roce_init(hr_dev
);
5863 dev_err(hr_dev
->dev
, "RoCE Engine init failed!\n");
5864 goto error_failed_get_cfg
;
5869 error_failed_get_cfg
:
5870 kfree(hr_dev
->priv
);
5872 error_failed_kzalloc
:
5873 ib_dealloc_device(&hr_dev
->ib_dev
);
5878 static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle
*handle
,
5881 struct hns_roce_dev
*hr_dev
= (struct hns_roce_dev
*)handle
->priv
;
5886 hns_roce_exit(hr_dev
);
5887 kfree(hr_dev
->priv
);
5888 ib_dealloc_device(&hr_dev
->ib_dev
);
5891 static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle
*handle
)
5893 struct hns_roce_dev
*hr_dev
= (struct hns_roce_dev
*)handle
->priv
;
5894 struct ib_event event
;
5897 dev_err(&handle
->pdev
->dev
,
5898 "Input parameter handle->priv is NULL!\n");
5902 hr_dev
->active
= false;
5903 hr_dev
->is_reset
= true;
5905 event
.event
= IB_EVENT_DEVICE_FATAL
;
5906 event
.device
= &hr_dev
->ib_dev
;
5907 event
.element
.port_num
= 1;
5908 ib_dispatch_event(&event
);
5913 static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle
*handle
)
5917 ret
= hns_roce_hw_v2_init_instance(handle
);
5919 /* when reset notify type is HNAE3_INIT_CLIENT In reset notify
5920 * callback function, RoCE Engine reinitialize. If RoCE reinit
5921 * failed, we should inform NIC driver.
5923 handle
->priv
= NULL
;
5924 dev_err(&handle
->pdev
->dev
,
5925 "In reset process RoCE reinit failed %d.\n", ret
);
5931 static int hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle
*handle
)
5934 hns_roce_hw_v2_uninit_instance(handle
, false);
5938 static int hns_roce_hw_v2_reset_notify(struct hnae3_handle
*handle
,
5939 enum hnae3_reset_notify_type type
)
5944 case HNAE3_DOWN_CLIENT
:
5945 ret
= hns_roce_hw_v2_reset_notify_down(handle
);
5947 case HNAE3_INIT_CLIENT
:
5948 ret
= hns_roce_hw_v2_reset_notify_init(handle
);
5950 case HNAE3_UNINIT_CLIENT
:
5951 ret
= hns_roce_hw_v2_reset_notify_uninit(handle
);
5960 static const struct hnae3_client_ops hns_roce_hw_v2_ops
= {
5961 .init_instance
= hns_roce_hw_v2_init_instance
,
5962 .uninit_instance
= hns_roce_hw_v2_uninit_instance
,
5963 .reset_notify
= hns_roce_hw_v2_reset_notify
,
5966 static struct hnae3_client hns_roce_hw_v2_client
= {
5967 .name
= "hns_roce_hw_v2",
5968 .type
= HNAE3_CLIENT_ROCE
,
5969 .ops
= &hns_roce_hw_v2_ops
,
5972 static int __init
hns_roce_hw_v2_init(void)
5974 return hnae3_register_client(&hns_roce_hw_v2_client
);
5977 static void __exit
hns_roce_hw_v2_exit(void)
5979 hnae3_unregister_client(&hns_roce_hw_v2_client
);
5982 module_init(hns_roce_hw_v2_init
);
5983 module_exit(hns_roce_hw_v2_exit
);
5985 MODULE_LICENSE("Dual BSD/GPL");
5986 MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
5987 MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
5988 MODULE_AUTHOR("Shaobo Xu <xushaobo2@huawei.com>");
5989 MODULE_DESCRIPTION("Hisilicon Hip08 Family RoCE Driver");