]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - drivers/infiniband/hw/hns/hns_roce_hw_v2.c
RDMA/hns: Add return operation when configured global param fail
[mirror_ubuntu-bionic-kernel.git] / drivers / infiniband / hw / hns / hns_roce_hw_v2.c
index 8e18445714a96db307d6e4cdda09117333c567ae..82afc5b47a2acfff58e916e14de99f96b5fa88c9 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/etherdevice.h>
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
+#include <net/addrconf.h>
 #include <rdma/ib_umem.h>
 
 #include "hnae3.h"
@@ -51,32 +52,113 @@ static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
        dseg->len  = cpu_to_le32(sg->length);
 }
 
+static int set_rwqe_data_seg(struct ib_qp *ibqp, struct ib_send_wr *wr,
+                            struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
+                            void *wqe, unsigned int *sge_ind,
+                            struct ib_send_wr **bad_wr)
+{
+       struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+       struct hns_roce_v2_wqe_data_seg *dseg = wqe;
+       struct hns_roce_qp *qp = to_hr_qp(ibqp);
+       int i;
+
+       if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
+               if (le32_to_cpu(rc_sq_wqe->msg_len) >
+                   hr_dev->caps.max_sq_inline) {
+                       *bad_wr = wr;
+                       dev_err(hr_dev->dev, "inline len(1-%d)=%d, illegal",
+                               rc_sq_wqe->msg_len, hr_dev->caps.max_sq_inline);
+                       return -EINVAL;
+               }
+
+               if (wr->opcode == IB_WR_RDMA_READ) {
+                       dev_err(hr_dev->dev, "Not support inline data!\n");
+                       return -EINVAL;
+               }
+
+               for (i = 0; i < wr->num_sge; i++) {
+                       memcpy(wqe, ((void *)wr->sg_list[i].addr),
+                              wr->sg_list[i].length);
+                       wqe += wr->sg_list[i].length;
+               }
+
+               roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S,
+                            1);
+       } else {
+               if (wr->num_sge <= 2) {
+                       for (i = 0; i < wr->num_sge; i++) {
+                               if (likely(wr->sg_list[i].length)) {
+                                       set_data_seg_v2(dseg, wr->sg_list + i);
+                                       dseg++;
+                               }
+                       }
+               } else {
+                       roce_set_field(rc_sq_wqe->byte_20,
+                                    V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
+                                    V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
+                                    (*sge_ind) & (qp->sge.sge_cnt - 1));
+
+                       for (i = 0; i < 2; i++) {
+                               if (likely(wr->sg_list[i].length)) {
+                                       set_data_seg_v2(dseg, wr->sg_list + i);
+                                       dseg++;
+                               }
+                       }
+
+                       dseg = get_send_extend_sge(qp,
+                                           (*sge_ind) & (qp->sge.sge_cnt - 1));
+
+                       for (i = 0; i < wr->num_sge - 2; i++) {
+                               if (likely(wr->sg_list[i + 2].length)) {
+                                       set_data_seg_v2(dseg,
+                                                       wr->sg_list + 2 + i);
+                                       dseg++;
+                                       (*sge_ind)++;
+                               }
+                       }
+               }
+
+               roce_set_field(rc_sq_wqe->byte_16,
+                              V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
+                              V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, wr->num_sge);
+       }
+
+       return 0;
+}
+
 static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                                 struct ib_send_wr **bad_wr)
 {
        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+       struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
+       struct hns_roce_v2_ud_send_wqe *ud_sq_wqe;
        struct hns_roce_v2_rc_send_wqe *rc_sq_wqe;
        struct hns_roce_qp *qp = to_hr_qp(ibqp);
        struct hns_roce_v2_wqe_data_seg *dseg;
        struct device *dev = hr_dev->dev;
        struct hns_roce_v2_db sq_db;
        unsigned int sge_ind = 0;
-       unsigned int wqe_sz = 0;
        unsigned int owner_bit;
        unsigned long flags;
        unsigned int ind;
        void *wqe = NULL;
+       u32 tmp_len = 0;
+       bool loopback;
        int ret = 0;
+       u8 *smac;
        int nreq;
        int i;
 
-       if (unlikely(ibqp->qp_type != IB_QPT_RC)) {
+       if (unlikely(ibqp->qp_type != IB_QPT_RC &&
+                    ibqp->qp_type != IB_QPT_GSI &&
+                    ibqp->qp_type != IB_QPT_UD)) {
                dev_err(dev, "Not supported QP(0x%x)type!\n", ibqp->qp_type);
-               *bad_wr = NULL;
+               *bad_wr = wr;
                return -EOPNOTSUPP;
        }
 
-       if (unlikely(qp->state != IB_QPS_RTS && qp->state != IB_QPS_SQD)) {
+       if (unlikely(qp->state == IB_QPS_RESET || qp->state == IB_QPS_INIT ||
+                    qp->state == IB_QPS_RTR)) {
                dev_err(dev, "Post WQE fail, QP state %d err!\n", qp->state);
                *bad_wr = wr;
                return -EINVAL;
@@ -105,162 +187,284 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] =
                                                                      wr->wr_id;
 
-               owner_bit = ~(qp->sq.head >> ilog2(qp->sq.wqe_cnt)) & 0x1;
-               rc_sq_wqe = wqe;
-               memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
-               for (i = 0; i < wr->num_sge; i++)
-                       rc_sq_wqe->msg_len += wr->sg_list[i].length;
+               owner_bit =
+                      ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
+
+               /* Corresponding to the QP type, wqe process separately */
+               if (ibqp->qp_type == IB_QPT_GSI) {
+                       ud_sq_wqe = wqe;
+                       memset(ud_sq_wqe, 0, sizeof(*ud_sq_wqe));
+
+                       roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_0_M,
+                                      V2_UD_SEND_WQE_DMAC_0_S, ah->av.mac[0]);
+                       roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_1_M,
+                                      V2_UD_SEND_WQE_DMAC_1_S, ah->av.mac[1]);
+                       roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_2_M,
+                                      V2_UD_SEND_WQE_DMAC_2_S, ah->av.mac[2]);
+                       roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_3_M,
+                                      V2_UD_SEND_WQE_DMAC_3_S, ah->av.mac[3]);
+                       roce_set_field(ud_sq_wqe->byte_48,
+                                      V2_UD_SEND_WQE_BYTE_48_DMAC_4_M,
+                                      V2_UD_SEND_WQE_BYTE_48_DMAC_4_S,
+                                      ah->av.mac[4]);
+                       roce_set_field(ud_sq_wqe->byte_48,
+                                      V2_UD_SEND_WQE_BYTE_48_DMAC_5_M,
+                                      V2_UD_SEND_WQE_BYTE_48_DMAC_5_S,
+                                      ah->av.mac[5]);
+
+                       /* MAC loopback */
+                       smac = (u8 *)hr_dev->dev_addr[qp->port];
+                       loopback = ether_addr_equal_unaligned(ah->av.mac,
+                                                             smac) ? 1 : 0;
+
+                       roce_set_bit(ud_sq_wqe->byte_40,
+                                    V2_UD_SEND_WQE_BYTE_40_LBI_S, loopback);
+
+                       roce_set_field(ud_sq_wqe->byte_4,
+                                      V2_UD_SEND_WQE_BYTE_4_OPCODE_M,
+                                      V2_UD_SEND_WQE_BYTE_4_OPCODE_S,
+                                      HNS_ROCE_V2_WQE_OP_SEND);
+
+                       for (i = 0; i < wr->num_sge; i++)
+                               tmp_len += wr->sg_list[i].length;
 
-               rc_sq_wqe->inv_key_immtdata = send_ieth(wr);
+                       ud_sq_wqe->msg_len =
+                        cpu_to_le32(le32_to_cpu(ud_sq_wqe->msg_len) + tmp_len);
+
+                       switch (wr->opcode) {
+                       case IB_WR_SEND_WITH_IMM:
+                       case IB_WR_RDMA_WRITE_WITH_IMM:
+                               ud_sq_wqe->immtdata = wr->ex.imm_data;
+                               break;
+                       default:
+                               ud_sq_wqe->immtdata = 0;
+                               break;
+                       }
 
-               roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_FENCE_S,
-                           (wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
+                       /* Set sig attr */
+                       roce_set_bit(ud_sq_wqe->byte_4,
+                                  V2_UD_SEND_WQE_BYTE_4_CQE_S,
+                                  (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
 
-               roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SE_S,
-                           (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
+                       /* Set se attr */
+                       roce_set_bit(ud_sq_wqe->byte_4,
+                                 V2_UD_SEND_WQE_BYTE_4_SE_S,
+                                 (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
 
-               roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_CQE_S,
-                           (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
+                       roce_set_bit(ud_sq_wqe->byte_4,
+                                    V2_UD_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
 
-               roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OWNER_S,
-                            owner_bit);
+                       roce_set_field(ud_sq_wqe->byte_16,
+                                      V2_UD_SEND_WQE_BYTE_16_PD_M,
+                                      V2_UD_SEND_WQE_BYTE_16_PD_S,
+                                      to_hr_pd(ibqp->pd)->pdn);
 
-               switch (wr->opcode) {
-               case IB_WR_RDMA_READ:
-                       roce_set_field(rc_sq_wqe->byte_4,
-                                      V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
-                                      V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
-                                      HNS_ROCE_V2_WQE_OP_RDMA_READ);
-                       rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
-                       rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
-                       break;
-               case IB_WR_RDMA_WRITE:
-                       roce_set_field(rc_sq_wqe->byte_4,
-                                      V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
-                                      V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
-                                      HNS_ROCE_V2_WQE_OP_RDMA_WRITE);
-                       rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
-                       rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
-                       break;
-               case IB_WR_RDMA_WRITE_WITH_IMM:
-                       roce_set_field(rc_sq_wqe->byte_4,
+                       roce_set_field(ud_sq_wqe->byte_16,
+                                      V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M,
+                                      V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S,
+                                      wr->num_sge);
+
+                       roce_set_field(ud_sq_wqe->byte_20,
+                                    V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
+                                    V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
+                                    sge_ind & (qp->sge.sge_cnt - 1));
+
+                       roce_set_field(ud_sq_wqe->byte_24,
+                                      V2_UD_SEND_WQE_BYTE_24_UDPSPN_M,
+                                      V2_UD_SEND_WQE_BYTE_24_UDPSPN_S, 0);
+                       ud_sq_wqe->qkey =
+                            cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ?
+                            qp->qkey : ud_wr(wr)->remote_qkey);
+                       roce_set_field(ud_sq_wqe->byte_32,
+                                      V2_UD_SEND_WQE_BYTE_32_DQPN_M,
+                                      V2_UD_SEND_WQE_BYTE_32_DQPN_S,
+                                      ud_wr(wr)->remote_qpn);
+
+                       roce_set_field(ud_sq_wqe->byte_36,
+                                      V2_UD_SEND_WQE_BYTE_36_VLAN_M,
+                                      V2_UD_SEND_WQE_BYTE_36_VLAN_S,
+                                      le16_to_cpu(ah->av.vlan));
+                       roce_set_field(ud_sq_wqe->byte_36,
+                                      V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M,
+                                      V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S,
+                                      ah->av.hop_limit);
+                       roce_set_field(ud_sq_wqe->byte_36,
+                                      V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
+                                      V2_UD_SEND_WQE_BYTE_36_TCLASS_S,
+                                      0);
+                       roce_set_field(ud_sq_wqe->byte_36,
+                                      V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
+                                      V2_UD_SEND_WQE_BYTE_36_TCLASS_S,
+                                      0);
+                       roce_set_field(ud_sq_wqe->byte_40,
+                                      V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M,
+                                      V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S, 0);
+                       roce_set_field(ud_sq_wqe->byte_40,
+                                      V2_UD_SEND_WQE_BYTE_40_SL_M,
+                                      V2_UD_SEND_WQE_BYTE_40_SL_S,
+                                     le32_to_cpu(ah->av.sl_tclass_flowlabel) >>
+                                     HNS_ROCE_SL_SHIFT);
+                       roce_set_field(ud_sq_wqe->byte_40,
+                                      V2_UD_SEND_WQE_BYTE_40_PORTN_M,
+                                      V2_UD_SEND_WQE_BYTE_40_PORTN_S,
+                                      qp->port);
+
+                       roce_set_field(ud_sq_wqe->byte_48,
+                                      V2_UD_SEND_WQE_BYTE_48_SGID_INDX_M,
+                                      V2_UD_SEND_WQE_BYTE_48_SGID_INDX_S,
+                                      hns_get_gid_index(hr_dev, qp->phy_port,
+                                                        ah->av.gid_index));
+
+                       memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0],
+                              GID_LEN_V2);
+
+                       dseg = get_send_extend_sge(qp,
+                                           sge_ind & (qp->sge.sge_cnt - 1));
+                       for (i = 0; i < wr->num_sge; i++) {
+                               set_data_seg_v2(dseg + i, wr->sg_list + i);
+                               sge_ind++;
+                       }
+
+                       ind++;
+               } else if (ibqp->qp_type == IB_QPT_RC) {
+                       rc_sq_wqe = wqe;
+                       memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
+                       for (i = 0; i < wr->num_sge; i++)
+                               tmp_len += wr->sg_list[i].length;
+
+                       rc_sq_wqe->msg_len =
+                        cpu_to_le32(le32_to_cpu(rc_sq_wqe->msg_len) + tmp_len);
+
+                       switch (wr->opcode) {
+                       case IB_WR_SEND_WITH_IMM:
+                       case IB_WR_RDMA_WRITE_WITH_IMM:
+                               rc_sq_wqe->immtdata = wr->ex.imm_data;
+                               break;
+                       case IB_WR_SEND_WITH_INV:
+                               rc_sq_wqe->inv_key =
+                                       cpu_to_le32(wr->ex.invalidate_rkey);
+                               break;
+                       default:
+                               rc_sq_wqe->immtdata = 0;
+                               break;
+                       }
+
+                       roce_set_bit(rc_sq_wqe->byte_4,
+                                    V2_RC_SEND_WQE_BYTE_4_FENCE_S,
+                                    (wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
+
+                       roce_set_bit(rc_sq_wqe->byte_4,
+                                 V2_RC_SEND_WQE_BYTE_4_SE_S,
+                                 (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
+
+                       roce_set_bit(rc_sq_wqe->byte_4,
+                                  V2_RC_SEND_WQE_BYTE_4_CQE_S,
+                                  (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
+
+                       roce_set_bit(rc_sq_wqe->byte_4,
+                                    V2_RC_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
+
+                       switch (wr->opcode) {
+                       case IB_WR_RDMA_READ:
+                               roce_set_field(rc_sq_wqe->byte_4,
+                                              V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+                                              V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+                                              HNS_ROCE_V2_WQE_OP_RDMA_READ);
+                               rc_sq_wqe->rkey =
+                                       cpu_to_le32(rdma_wr(wr)->rkey);
+                               rc_sq_wqe->va =
+                                       cpu_to_le64(rdma_wr(wr)->remote_addr);
+                               break;
+                       case IB_WR_RDMA_WRITE:
+                               roce_set_field(rc_sq_wqe->byte_4,
+                                              V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+                                              V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+                                              HNS_ROCE_V2_WQE_OP_RDMA_WRITE);
+                               rc_sq_wqe->rkey =
+                                       cpu_to_le32(rdma_wr(wr)->rkey);
+                               rc_sq_wqe->va =
+                                       cpu_to_le64(rdma_wr(wr)->remote_addr);
+                               break;
+                       case IB_WR_RDMA_WRITE_WITH_IMM:
+                               roce_set_field(rc_sq_wqe->byte_4,
                                       V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
                                       V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
                                       HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM);
-                       rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
-                       rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
-                       break;
-               case IB_WR_SEND:
-                       roce_set_field(rc_sq_wqe->byte_4,
-                                      V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
-                                      V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
-                                      HNS_ROCE_V2_WQE_OP_SEND);
-                       break;
-               case IB_WR_SEND_WITH_INV:
-                       roce_set_field(rc_sq_wqe->byte_4,
+                               rc_sq_wqe->rkey =
+                                       cpu_to_le32(rdma_wr(wr)->rkey);
+                               rc_sq_wqe->va =
+                                       cpu_to_le64(rdma_wr(wr)->remote_addr);
+                               break;
+                       case IB_WR_SEND:
+                               roce_set_field(rc_sq_wqe->byte_4,
+                                              V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+                                              V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+                                              HNS_ROCE_V2_WQE_OP_SEND);
+                               break;
+                       case IB_WR_SEND_WITH_INV:
+                               roce_set_field(rc_sq_wqe->byte_4,
                                       V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
                                       V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
                                       HNS_ROCE_V2_WQE_OP_SEND_WITH_INV);
-                       break;
-               case IB_WR_SEND_WITH_IMM:
-                       roce_set_field(rc_sq_wqe->byte_4,
-                                      V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
-                                      V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
-                                      HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM);
-                       break;
-               case IB_WR_LOCAL_INV:
-                       roce_set_field(rc_sq_wqe->byte_4,
-                                      V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
-                                      V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
-                                      HNS_ROCE_V2_WQE_OP_LOCAL_INV);
-                       break;
-               case IB_WR_ATOMIC_CMP_AND_SWP:
-                       roce_set_field(rc_sq_wqe->byte_4,
-                                      V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
-                                      V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
-                                      HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP);
-                       break;
-               case IB_WR_ATOMIC_FETCH_AND_ADD:
-                       roce_set_field(rc_sq_wqe->byte_4,
-                                      V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
-                                      V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
-                                      HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD);
-                       break;
-               case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
-                       roce_set_field(rc_sq_wqe->byte_4,
+                               break;
+                       case IB_WR_SEND_WITH_IMM:
+                               roce_set_field(rc_sq_wqe->byte_4,
+                                             V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+                                             V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+                                             HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM);
+                               break;
+                       case IB_WR_LOCAL_INV:
+                               roce_set_field(rc_sq_wqe->byte_4,
+                                              V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+                                              V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+                                              HNS_ROCE_V2_WQE_OP_LOCAL_INV);
+                               break;
+                       case IB_WR_ATOMIC_CMP_AND_SWP:
+                               roce_set_field(rc_sq_wqe->byte_4,
+                                         V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+                                         V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+                                         HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP);
+                               break;
+                       case IB_WR_ATOMIC_FETCH_AND_ADD:
+                               roce_set_field(rc_sq_wqe->byte_4,
+                                        V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+                                        V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+                                        HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD);
+                               break;
+                       case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
+                               roce_set_field(rc_sq_wqe->byte_4,
                                      V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
                                      V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
                                      HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP);
-                       break;
-               case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
-                       roce_set_field(rc_sq_wqe->byte_4,
+                               break;
+                       case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
+                               roce_set_field(rc_sq_wqe->byte_4,
                                     V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
                                     V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
                                     HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD);
-                       break;
-               default:
-                       roce_set_field(rc_sq_wqe->byte_4,
-                                      V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
-                                      V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
-                                      HNS_ROCE_V2_WQE_OP_MASK);
-                       break;
-               }
-
-               wqe += sizeof(struct hns_roce_v2_rc_send_wqe);
-               dseg = wqe;
-               if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
-                       if (rc_sq_wqe->msg_len >
-                               hr_dev->caps.max_sq_inline) {
-                               ret = -EINVAL;
-                               *bad_wr = wr;
-                               dev_err(dev, "inline len(1-%d)=%d, illegal",
-                                       rc_sq_wqe->msg_len,
-                                       hr_dev->caps.max_sq_inline);
-                               goto out;
+                               break;
+                       default:
+                               roce_set_field(rc_sq_wqe->byte_4,
+                                              V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+                                              V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+                                              HNS_ROCE_V2_WQE_OP_MASK);
+                               break;
                        }
 
-                       for (i = 0; i < wr->num_sge; i++) {
-                               memcpy(wqe, ((void *)wr->sg_list[i].addr),
-                                      wr->sg_list[i].length);
-                               wqe += wr->sg_list[i].length;
-                               wqe_sz += wr->sg_list[i].length;
-                       }
+                       wqe += sizeof(struct hns_roce_v2_rc_send_wqe);
+                       dseg = wqe;
 
-                       roce_set_bit(rc_sq_wqe->byte_4,
-                                    V2_RC_SEND_WQE_BYTE_4_INLINE_S, 1);
+                       ret = set_rwqe_data_seg(ibqp, wr, rc_sq_wqe, wqe,
+                                               &sge_ind, bad_wr);
+                       if (ret)
+                               goto out;
+                       ind++;
                } else {
-                       if (wr->num_sge <= 2) {
-                               for (i = 0; i < wr->num_sge; i++)
-                                       set_data_seg_v2(dseg + i,
-                                                       wr->sg_list + i);
-                       } else {
-                               roce_set_field(rc_sq_wqe->byte_20,
-                               V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
-                               V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
-                               sge_ind & (qp->sge.sge_cnt - 1));
-
-                               for (i = 0; i < 2; i++)
-                                       set_data_seg_v2(dseg + i,
-                                                       wr->sg_list + i);
-
-                               dseg = get_send_extend_sge(qp,
-                                       sge_ind & (qp->sge.sge_cnt - 1));
-
-                               for (i = 0; i < wr->num_sge - 2; i++) {
-                                       set_data_seg_v2(dseg + i,
-                                                       wr->sg_list + 2 + i);
-                                       sge_ind++;
-                               }
-                       }
-
-                       roce_set_field(rc_sq_wqe->byte_16,
-                                      V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
-                                      V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S,
-                                      wr->num_sge);
-                       wqe_sz += wr->num_sge *
-                                 sizeof(struct hns_roce_v2_wqe_data_seg);
+                       dev_err(dev, "Illegal qp_type(0x%x)\n", ibqp->qp_type);
+                       spin_unlock_irqrestore(&qp->sq.lock, flags);
+                       *bad_wr = wr;
+                       return -EOPNOTSUPP;
                }
-               ind++;
        }
 
 out:
@@ -282,7 +486,7 @@ out:
                roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M,
                               V2_DB_PARAMETER_SL_S, qp->sl);
 
-               hns_roce_write64_k((__be32 *)&sq_db, qp->sq.db_reg_l);
+               hns_roce_write64_k((__le32 *)&sq_db, qp->sq.db_reg_l);
 
                qp->sq_next_wqe = ind;
                qp->next_sge = sge_ind;
@@ -299,8 +503,8 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
        struct hns_roce_v2_wqe_data_seg *dseg;
+       struct hns_roce_rinl_sge *sge_list;
        struct device *dev = hr_dev->dev;
-       struct hns_roce_v2_db rq_db;
        unsigned long flags;
        void *wqe = NULL;
        int ret = 0;
@@ -343,10 +547,22 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
                }
 
                if (i < hr_qp->rq.max_gs) {
-                       dseg[i].lkey = cpu_to_be32(HNS_ROCE_INVALID_LKEY);
+                       dseg[i].lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
                        dseg[i].addr = 0;
                }
 
+               /* rq support inline data */
+               if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
+                       sge_list = hr_qp->rq_inl_buf.wqe_list[ind].sg_list;
+                       hr_qp->rq_inl_buf.wqe_list[ind].sge_cnt =
+                                                              (u32)wr->num_sge;
+                       for (i = 0; i < wr->num_sge; i++) {
+                               sge_list[i].addr =
+                                              (void *)(u64)wr->sg_list[i].addr;
+                               sge_list[i].len = wr->sg_list[i].length;
+                       }
+               }
+
                hr_qp->rq.wrid[ind] = wr->wr_id;
 
                ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1);
@@ -358,17 +574,7 @@ out:
                /* Memory barrier */
                wmb();
 
-               rq_db.byte_4 = 0;
-               rq_db.parameter = 0;
-
-               roce_set_field(rq_db.byte_4, V2_DB_BYTE_4_TAG_M,
-                              V2_DB_BYTE_4_TAG_S, hr_qp->qpn);
-               roce_set_field(rq_db.byte_4, V2_DB_BYTE_4_CMD_M,
-                              V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_RQ_DB);
-               roce_set_field(rq_db.parameter, V2_DB_PARAMETER_CONS_IDX_M,
-                              V2_DB_PARAMETER_CONS_IDX_S, hr_qp->rq.head);
-
-               hns_roce_write64_k((__be32 *)&rq_db, hr_qp->rq.db_reg_l);
+               *hr_qp->rdb.db_record = hr_qp->rq.head & 0xffff;
        }
        spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
 
@@ -879,6 +1085,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
        if (ret) {
                dev_err(hr_dev->dev, "Configure global param fail, ret = %d.\n",
                        ret);
+               return ret;
        }
 
        /* Get pf resource owned by every pf */
@@ -908,9 +1115,9 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
        caps->max_sq_inline     = HNS_ROCE_V2_MAX_SQ_INLINE;
        caps->num_uars          = HNS_ROCE_V2_UAR_NUM;
        caps->phy_num_uars      = HNS_ROCE_V2_PHY_UAR_NUM;
-       caps->num_aeq_vectors   = 1;
-       caps->num_comp_vectors  = 63;
-       caps->num_other_vectors = 0;
+       caps->num_aeq_vectors   = HNS_ROCE_V2_AEQE_VEC_NUM;
+       caps->num_comp_vectors  = HNS_ROCE_V2_COMP_VEC_NUM;
+       caps->num_other_vectors = HNS_ROCE_V2_ABNORMAL_VEC_NUM;
        caps->num_mtpts         = HNS_ROCE_V2_MAX_MTPT_NUM;
        caps->num_mtt_segs      = HNS_ROCE_V2_MAX_MTT_SEGS;
        caps->num_cqe_segs      = HNS_ROCE_V2_MAX_CQE_SEGS;
@@ -955,12 +1162,19 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
        caps->cqe_ba_pg_sz      = 0;
        caps->cqe_buf_pg_sz     = 0;
        caps->cqe_hop_num       = HNS_ROCE_CQE_HOP_NUM;
+       caps->eqe_ba_pg_sz      = 0;
+       caps->eqe_buf_pg_sz     = 0;
+       caps->eqe_hop_num       = HNS_ROCE_EQE_HOP_NUM;
        caps->chunk_sz          = HNS_ROCE_V2_TABLE_CHUNK_SIZE;
 
        caps->flags             = HNS_ROCE_CAP_FLAG_REREG_MR |
-                                 HNS_ROCE_CAP_FLAG_ROCE_V1_V2;
+                                 HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
+                                 HNS_ROCE_CAP_FLAG_RQ_INLINE |
+                                 HNS_ROCE_CAP_FLAG_RECORD_DB;
        caps->pkey_table_len[0] = 1;
        caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
+       caps->ceqe_depth        = HNS_ROCE_V2_COMP_EQE_NUM;
+       caps->aeqe_depth        = HNS_ROCE_V2_ASYNC_EQE_NUM;
        caps->local_ca_ack_delay = 0;
        caps->max_mtu = IB_MTU_4096;
 
@@ -1016,14 +1230,14 @@ static int hns_roce_v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
        roce_set_field(val1, HNS_ROCE_VF_MB5_TOKEN_MASK,
                       HNS_ROCE_VF_MB5_TOKEN_SHIFT, token);
 
-       __raw_writeq(cpu_to_le64(in_param), hcr + 0);
-       __raw_writeq(cpu_to_le64(out_param), hcr + 2);
+       writeq(in_param, hcr + 0);
+       writeq(out_param, hcr + 2);
 
        /* Memory barrier */
        wmb();
 
-       __raw_writel(cpu_to_le32(val0), hcr + 4);
-       __raw_writel(cpu_to_le32(val1), hcr + 5);
+       writel(val0, hcr + 4);
+       writel(val1, hcr + 5);
 
        mmiowb();
 
@@ -1295,24 +1509,7 @@ static struct hns_roce_v2_cqe *next_cqe_sw_v2(struct hns_roce_cq *hr_cq)
 
 static void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
 {
-       struct hns_roce_v2_cq_db cq_db;
-
-       cq_db.byte_4 = 0;
-       cq_db.parameter = 0;
-
-       roce_set_field(cq_db.byte_4, V2_CQ_DB_BYTE_4_TAG_M,
-                      V2_CQ_DB_BYTE_4_TAG_S, hr_cq->cqn);
-       roce_set_field(cq_db.byte_4, V2_CQ_DB_BYTE_4_CMD_M,
-                      V2_CQ_DB_BYTE_4_CMD_S, HNS_ROCE_V2_CQ_DB_PTR);
-
-       roce_set_field(cq_db.parameter, V2_CQ_DB_PARAMETER_CONS_IDX_M,
-                      V2_CQ_DB_PARAMETER_CONS_IDX_S,
-                      cons_index & ((hr_cq->cq_depth << 1) - 1));
-       roce_set_field(cq_db.parameter, V2_CQ_DB_PARAMETER_CMD_SN_M,
-                      V2_CQ_DB_PARAMETER_CMD_SN_S, 1);
-
-       hns_roce_write64_k((__be32 *)&cq_db, hr_cq->cq_db_l);
-
+       *hr_cq->set_ci_db = cons_index & 0xffffff;
 }
 
 static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
@@ -1382,6 +1579,8 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
 
        roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CQ_ST_M,
                       V2_CQC_BYTE_4_CQ_ST_S, V2_CQ_STATE_VALID);
+       roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_ARM_ST_M,
+                      V2_CQC_BYTE_4_ARM_ST_S, REG_NXT_CEQE);
        roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_SHIFT_M,
                       V2_CQC_BYTE_4_SHIFT_S, ilog2((unsigned int)nent));
        roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CEQN_M,
@@ -1422,6 +1621,25 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
 
        roce_set_field(cq_context->byte_40_cqe_ba, V2_CQC_BYTE_40_CQE_BA_M,
                       V2_CQC_BYTE_40_CQE_BA_S, (dma_handle >> (32 + 3)));
+
+       if (hr_cq->db_en)
+               roce_set_bit(cq_context->byte_44_db_record,
+                            V2_CQC_BYTE_44_DB_RECORD_EN_S, 1);
+
+       roce_set_field(cq_context->byte_44_db_record,
+                      V2_CQC_BYTE_44_DB_RECORD_ADDR_M,
+                      V2_CQC_BYTE_44_DB_RECORD_ADDR_S,
+                      ((u32)hr_cq->db.dma) >> 1);
+       cq_context->db_record_addr = hr_cq->db.dma >> 32;
+
+       roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
+                      V2_CQC_BYTE_56_CQ_MAX_CNT_M,
+                      V2_CQC_BYTE_56_CQ_MAX_CNT_S,
+                      HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM);
+       roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
+                      V2_CQC_BYTE_56_CQ_PERIOD_M,
+                      V2_CQC_BYTE_56_CQ_PERIOD_S,
+                      HNS_ROCE_V2_CQ_DEFAULT_INTERVAL);
 }
 
 static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
@@ -1457,6 +1675,40 @@ static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
        return 0;
 }
 
+static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
+                                                   struct hns_roce_qp **cur_qp,
+                                                   struct ib_wc *wc)
+{
+       struct hns_roce_rinl_sge *sge_list;
+       u32 wr_num, wr_cnt, sge_num;
+       u32 sge_cnt, data_len, size;
+       void *wqe_buf;
+
+       wr_num = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_WQE_INDX_M,
+                               V2_CQE_BYTE_4_WQE_INDX_S) & 0xffff;
+       wr_cnt = wr_num & ((*cur_qp)->rq.wqe_cnt - 1);
+
+       sge_list = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sg_list;
+       sge_num = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sge_cnt;
+       wqe_buf = get_recv_wqe(*cur_qp, wr_cnt);
+       data_len = wc->byte_len;
+
+       for (sge_cnt = 0; (sge_cnt < sge_num) && (data_len); sge_cnt++) {
+               size = min(sge_list[sge_cnt].len, data_len);
+               memcpy((void *)sge_list[sge_cnt].addr, wqe_buf, size);
+
+               data_len -= size;
+               wqe_buf += size;
+       }
+
+       if (data_len) {
+               wc->status = IB_WC_LOC_LEN_ERR;
+               return -EAGAIN;
+       }
+
+       return 0;
+}
+
 static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
                                struct hns_roce_qp **cur_qp, struct ib_wc *wc)
 {
@@ -1469,6 +1721,7 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
        u32 opcode;
        u32 status;
        int qpn;
+       int ret;
 
        /* Find cqe according to consumer index */
        cqe = next_cqe_sw_v2(hr_cq);
@@ -1636,7 +1889,7 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
                case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
                        wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
                        wc->wc_flags = IB_WC_WITH_IMM;
-                       wc->ex.imm_data = le32_to_cpu(cqe->rkey_immtdata);
+                       wc->ex.imm_data = cqe->immtdata;
                        break;
                case HNS_ROCE_V2_OPCODE_SEND:
                        wc->opcode = IB_WC_RECV;
@@ -1645,18 +1898,29 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
                case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
                        wc->opcode = IB_WC_RECV;
                        wc->wc_flags = IB_WC_WITH_IMM;
-                       wc->ex.imm_data = le32_to_cpu(cqe->rkey_immtdata);
+                       wc->ex.imm_data = cqe->immtdata;
                        break;
                case HNS_ROCE_V2_OPCODE_SEND_WITH_INV:
                        wc->opcode = IB_WC_RECV;
                        wc->wc_flags = IB_WC_WITH_INVALIDATE;
-                       wc->ex.invalidate_rkey = cqe->rkey_immtdata;
+                       wc->ex.invalidate_rkey = le32_to_cpu(cqe->rkey);
                        break;
                default:
                        wc->status = IB_WC_GENERAL_ERR;
                        break;
                }
 
+               if ((wc->qp->qp_type == IB_QPT_RC ||
+                    wc->qp->qp_type == IB_QPT_UC) &&
+                   (opcode == HNS_ROCE_V2_OPCODE_SEND ||
+                   opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_IMM ||
+                   opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
+                   (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_RQ_INLINE_S))) {
+                       ret = hns_roce_handle_recv_inl_wqe(cqe, cur_qp, wc);
+                       if (ret)
+                               return -EAGAIN;
+               }
+
                /* Update tail pointer, record wr_id */
                wq = &(*cur_qp)->rq;
                wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
@@ -1670,6 +1934,21 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
                wc->wc_flags |= (roce_get_bit(cqe->byte_32,
                                              V2_CQE_BYTE_32_GRH_S) ?
                                              IB_WC_GRH : 0);
+               wc->port_num = roce_get_field(cqe->byte_32,
+                               V2_CQE_BYTE_32_PORTN_M, V2_CQE_BYTE_32_PORTN_S);
+               wc->pkey_index = 0;
+               memcpy(wc->smac, cqe->smac, 4);
+               wc->smac[4] = roce_get_field(cqe->byte_28,
+                                            V2_CQE_BYTE_28_SMAC_4_M,
+                                            V2_CQE_BYTE_28_SMAC_4_S);
+               wc->smac[5] = roce_get_field(cqe->byte_28,
+                                            V2_CQE_BYTE_28_SMAC_5_M,
+                                            V2_CQE_BYTE_28_SMAC_5_S);
+               wc->vlan_id = 0xffff;
+               wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
+               wc->network_hdr_type = roce_get_field(cqe->byte_28,
+                                                   V2_CQE_BYTE_28_PORT_TYPE_M,
+                                                   V2_CQE_BYTE_28_PORT_TYPE_S);
        }
 
        return 0;
@@ -1859,11 +2138,43 @@ static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev,
        return ret;
 }
 
+static void set_access_flags(struct hns_roce_qp *hr_qp,
+                            struct hns_roce_v2_qp_context *context,
+                            struct hns_roce_v2_qp_context *qpc_mask,
+                            const struct ib_qp_attr *attr, int attr_mask)
+{
+       u8 dest_rd_atomic;
+       u32 access_flags;
+
+       dest_rd_atomic = (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) ?
+                        attr->max_dest_rd_atomic : hr_qp->resp_depth;
+
+       access_flags = (attr_mask & IB_QP_ACCESS_FLAGS) ?
+                      attr->qp_access_flags : hr_qp->atomic_rd_en;
+
+       if (!dest_rd_atomic)
+               access_flags &= IB_ACCESS_REMOTE_WRITE;
+
+       roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
+                    !!(access_flags & IB_ACCESS_REMOTE_READ));
+       roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, 0);
+
+       roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
+                    !!(access_flags & IB_ACCESS_REMOTE_WRITE));
+       roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S, 0);
+
+       roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
+                    !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
+       roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0);
+}
+
 static void modify_qp_reset_to_init(struct ib_qp *ibqp,
                                    const struct ib_qp_attr *attr,
+                                   int attr_mask,
                                    struct hns_roce_v2_qp_context *context,
                                    struct hns_roce_v2_qp_context *qpc_mask)
 {
+       struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
 
        /*
@@ -1877,9 +2188,18 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
        roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
                       V2_QPC_BYTE_4_TST_S, 0);
 
-       roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
-                      V2_QPC_BYTE_4_SGE_SHIFT_S, hr_qp->sq.max_gs > 2 ?
-                      ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
+       if (ibqp->qp_type == IB_QPT_GSI)
+               roce_set_field(context->byte_4_sqpn_tst,
+                              V2_QPC_BYTE_4_SGE_SHIFT_M,
+                              V2_QPC_BYTE_4_SGE_SHIFT_S,
+                              ilog2((unsigned int)hr_qp->sge.sge_cnt));
+       else
+               roce_set_field(context->byte_4_sqpn_tst,
+                              V2_QPC_BYTE_4_SGE_SHIFT_M,
+                              V2_QPC_BYTE_4_SGE_SHIFT_S,
+                              hr_qp->sq.max_gs > 2 ?
+                              ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
+
        roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
                       V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
 
@@ -1944,18 +2264,31 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
        roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CNP_TX_FLAG_S, 0);
        roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CE_FLAG_S, 0);
 
-       roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
-                    !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
-       roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, 0);
-
-       roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
-                    !!(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE));
-       roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S, 0);
+       if (attr_mask & IB_QP_QKEY) {
+               context->qkey_xrcd = attr->qkey;
+               qpc_mask->qkey_xrcd = 0;
+               hr_qp->qkey = attr->qkey;
+       }
 
-       roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
-                    !!(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC));
-       roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0);
+       if (hr_qp->rdb_en) {
+               roce_set_bit(context->byte_68_rq_db,
+                            V2_QPC_BYTE_68_RQ_RECORD_EN_S, 1);
+               roce_set_bit(qpc_mask->byte_68_rq_db,
+                            V2_QPC_BYTE_68_RQ_RECORD_EN_S, 0);
+       }
 
+       roce_set_field(context->byte_68_rq_db,
+                      V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M,
+                      V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S,
+                      ((u32)hr_qp->rdb.dma) >> 1);
+       roce_set_field(qpc_mask->byte_68_rq_db,
+                      V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M,
+                      V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S, 0);
+       context->rq_db_record_addr = hr_qp->rdb.dma >> 32;
+       qpc_mask->rq_db_record_addr = 0;
+
+       roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S,
+                   (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) ? 1 : 0);
        roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 0);
 
        roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
@@ -2176,9 +2509,17 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp,
        roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
                       V2_QPC_BYTE_4_TST_S, 0);
 
-       roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
-                      V2_QPC_BYTE_4_SGE_SHIFT_S, hr_qp->sq.max_gs > 2 ?
-                      ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
+       if (ibqp->qp_type == IB_QPT_GSI)
+               roce_set_field(context->byte_4_sqpn_tst,
+                              V2_QPC_BYTE_4_SGE_SHIFT_M,
+                              V2_QPC_BYTE_4_SGE_SHIFT_S,
+                              ilog2((unsigned int)hr_qp->sge.sge_cnt));
+       else
+               roce_set_field(context->byte_4_sqpn_tst,
+                              V2_QPC_BYTE_4_SGE_SHIFT_M,
+                              V2_QPC_BYTE_4_SGE_SHIFT_S, hr_qp->sq.max_gs > 2 ?
+                              ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
+
        roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
                       V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
 
@@ -2239,7 +2580,7 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp,
                       V2_QPC_BYTE_80_RX_CQN_S, 0);
 
        roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
-                      V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
+                      V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
        roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
                       V2_QPC_BYTE_252_TX_CQN_S, 0);
 
@@ -2255,20 +2596,22 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp,
                               V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
        }
 
-       if (attr_mask & IB_QP_PKEY_INDEX)
-               context->qkey_xrcd = attr->pkey_index;
-       else
-               context->qkey_xrcd = hr_qp->pkey_index;
+       if (attr_mask & IB_QP_QKEY) {
+               context->qkey_xrcd = attr->qkey;
+               qpc_mask->qkey_xrcd = 0;
+       }
 
        roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
                       V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
        roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
                       V2_QPC_BYTE_4_SQPN_S, 0);
 
-       roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
-                      V2_QPC_BYTE_56_DQPN_S, hr_qp->qpn);
-       roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
-                      V2_QPC_BYTE_56_DQPN_S, 0);
+       if (attr_mask & IB_QP_DEST_QPN) {
+               roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
+                              V2_QPC_BYTE_56_DQPN_S, hr_qp->qpn);
+               roce_set_field(qpc_mask->byte_56_dqpn_err,
+                              V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
+       }
        roce_set_field(context->byte_168_irrl_idx,
                       V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
                       V2_QPC_BYTE_168_SQ_SHIFT_BAK_S,
@@ -2323,8 +2666,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
                return -EINVAL;
        }
 
-       if ((attr_mask & IB_QP_ALT_PATH) || (attr_mask & IB_QP_ACCESS_FLAGS) ||
-           (attr_mask & IB_QP_PKEY_INDEX) || (attr_mask & IB_QP_QKEY)) {
+       if (attr_mask & IB_QP_ALT_PATH) {
                dev_err(dev, "INIT2RTR attr_mask (0x%x) error\n", attr_mask);
                return -EINVAL;
        }
@@ -2354,7 +2696,8 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
        roce_set_field(context->byte_20_smac_sgid_idx,
                       V2_QPC_BYTE_20_SGE_HOP_NUM_M,
                       V2_QPC_BYTE_20_SGE_HOP_NUM_S,
-                      hr_qp->sq.max_gs > 2 ? hr_dev->caps.mtt_hop_num : 0);
+                      ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ?
+                      hr_dev->caps.mtt_hop_num : 0);
        roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
                       V2_QPC_BYTE_20_SGE_HOP_NUM_M,
                       V2_QPC_BYTE_20_SGE_HOP_NUM_S, 0);
@@ -2463,16 +2806,21 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
                roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 0);
        }
 
-       roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
-                      V2_QPC_BYTE_140_RR_MAX_S,
-                      ilog2((unsigned int)attr->max_dest_rd_atomic));
-       roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
-                      V2_QPC_BYTE_140_RR_MAX_S, 0);
+       if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) &&
+            attr->max_dest_rd_atomic) {
+               roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
+                              V2_QPC_BYTE_140_RR_MAX_S,
+                              fls(attr->max_dest_rd_atomic - 1));
+               roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
+                              V2_QPC_BYTE_140_RR_MAX_S, 0);
+       }
 
-       roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
-                      V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num);
-       roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
-                      V2_QPC_BYTE_56_DQPN_S, 0);
+       if (attr_mask & IB_QP_DEST_QPN) {
+               roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
+                              V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num);
+               roce_set_field(qpc_mask->byte_56_dqpn_err,
+                              V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
+       }
 
        /* Configure GID index */
        port_num = rdma_ah_get_port_num(&attr->ah_attr);
@@ -2511,8 +2859,13 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
        roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
                       V2_QPC_BYTE_24_TC_S, 0);
 
-       roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
-                      V2_QPC_BYTE_24_MTU_S, attr->path_mtu);
+       if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
+               roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
+                              V2_QPC_BYTE_24_MTU_S, IB_MTU_4096);
+       else if (attr_mask & IB_QP_PATH_MTU)
+               roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
+                              V2_QPC_BYTE_24_MTU_S, attr->path_mtu);
+
        roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
                       V2_QPC_BYTE_24_MTU_S, 0);
 
@@ -2557,12 +2910,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
                       V2_QPC_BYTE_168_LP_SGEN_INI_M,
                       V2_QPC_BYTE_168_LP_SGEN_INI_S, 0);
 
-       roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
-                      V2_QPC_BYTE_208_SR_MAX_S,
-                      ilog2((unsigned int)attr->max_rd_atomic));
-       roce_set_field(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
-                      V2_QPC_BYTE_208_SR_MAX_S, 0);
-
        roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
                       V2_QPC_BYTE_28_SL_S, rdma_ah_get_sl(&attr->ah_attr));
        roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
@@ -2592,11 +2939,9 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
                return -EINVAL;
        }
 
-       /* If exist optional param, return error */
-       if ((attr_mask & IB_QP_ALT_PATH) || (attr_mask & IB_QP_ACCESS_FLAGS) ||
-           (attr_mask & IB_QP_QKEY) || (attr_mask & IB_QP_PATH_MIG_STATE) ||
-           (attr_mask & IB_QP_CUR_STATE) ||
-           (attr_mask & IB_QP_MIN_RNR_TIMER)) {
+       /* Not support alternate path and path migration */
+       if ((attr_mask & IB_QP_ALT_PATH) ||
+           (attr_mask & IB_QP_PATH_MIG_STATE)) {
                dev_err(dev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask);
                return -EINVAL;
        }
@@ -2625,13 +2970,14 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
                       V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, 0);
 
        page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
-       context->sq_cur_sge_blk_addr = hr_qp->sq.max_gs > 2 ?
+       context->sq_cur_sge_blk_addr =
+                      ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ?
                                      ((u32)(mtts[hr_qp->sge.offset / page_size]
                                      >> PAGE_ADDR_SHIFT)) : 0;
        roce_set_field(context->byte_184_irrl_idx,
                       V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
                       V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S,
-                      hr_qp->sq.max_gs > 2 ?
+                      ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ?
                       (mtts[hr_qp->sge.offset / page_size] >>
                       (32 + PAGE_ADDR_SHIFT)) : 0);
        qpc_mask->sq_cur_sge_blk_addr = 0;
@@ -2766,6 +3112,14 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
        roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_SQ_MAX_PSN_M,
                       V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0);
 
+       if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
+               roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
+                              V2_QPC_BYTE_208_SR_MAX_S,
+                              fls(attr->max_rd_atomic - 1));
+               roce_set_field(qpc_mask->byte_208_irrl,
+                              V2_QPC_BYTE_208_SR_MAX_M,
+                              V2_QPC_BYTE_208_SR_MAX_S, 0);
+       }
        return 0;
 }
 
@@ -2794,7 +3148,8 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
         */
        memset(qpc_mask, 0xff, sizeof(*qpc_mask));
        if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
-               modify_qp_reset_to_init(ibqp, attr, context, qpc_mask);
+               modify_qp_reset_to_init(ibqp, attr, attr_mask, context,
+                                       qpc_mask);
        } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
                modify_qp_init_to_init(ibqp, attr, attr_mask, context,
                                       qpc_mask);
@@ -2821,7 +3176,8 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
                   (cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) ||
                   (cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) ||
                   (cur_state == IB_QPS_SQD && new_state == IB_QPS_ERR) ||
-                  (cur_state == IB_QPS_SQE && new_state == IB_QPS_ERR)) {
+                  (cur_state == IB_QPS_SQE && new_state == IB_QPS_ERR) ||
+                  (cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR)) {
                /* Nothing */
                ;
        } else {
@@ -2829,6 +3185,9 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
                goto out;
        }
 
+       if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
+               set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
+
        /* Every status migrate must change state */
        roce_set_field(context->byte_60_qpst_mapid, V2_QPC_BYTE_60_QP_ST_M,
                       V2_QPC_BYTE_60_QP_ST_S, new_state);
@@ -2845,6 +3204,9 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
 
        hr_qp->state = new_state;
 
+       if (attr_mask & IB_QP_ACCESS_FLAGS)
+               hr_qp->atomic_rd_en = attr->qp_access_flags;
+
        if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
                hr_qp->resp_depth = attr->max_dest_rd_atomic;
        if (attr_mask & IB_QP_PORT) {
@@ -2865,6 +3227,8 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
                hr_qp->sq.tail = 0;
                hr_qp->sq_next_wqe = 0;
                hr_qp->next_sge = 0;
+               if (hr_qp->rq.wqe_cnt)
+                       *hr_qp->rdb.db_record = 0;
        }
 
 out:
@@ -3091,11 +3455,22 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
        hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
 
        if (is_user) {
+               if (hr_qp->rq.wqe_cnt && (hr_qp->rdb_en == 1))
+                       hns_roce_db_unmap_user(
+                               to_hr_ucontext(hr_qp->ibqp.uobject->context),
+                               &hr_qp->rdb);
                ib_umem_release(hr_qp->umem);
        } else {
                kfree(hr_qp->sq.wrid);
                kfree(hr_qp->rq.wrid);
                hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
+               if (hr_qp->rq.wqe_cnt)
+                       hns_roce_free_db(hr_dev, &hr_qp->rdb);
+       }
+
+       if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
+               kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
+               kfree(hr_qp->rq_inl_buf.wqe_list);
        }
 
        return 0;
@@ -3162,60 +3537,1212 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
        return ret;
 }
 
-static const struct hns_roce_hw hns_roce_hw_v2 = {
-       .cmq_init = hns_roce_v2_cmq_init,
-       .cmq_exit = hns_roce_v2_cmq_exit,
-       .hw_profile = hns_roce_v2_profile,
-       .post_mbox = hns_roce_v2_post_mbox,
-       .chk_mbox = hns_roce_v2_chk_mbox,
-       .set_gid = hns_roce_v2_set_gid,
-       .set_mac = hns_roce_v2_set_mac,
-       .write_mtpt = hns_roce_v2_write_mtpt,
-       .rereg_write_mtpt = hns_roce_v2_rereg_write_mtpt,
-       .write_cqc = hns_roce_v2_write_cqc,
-       .set_hem = hns_roce_v2_set_hem,
-       .clear_hem = hns_roce_v2_clear_hem,
-       .modify_qp = hns_roce_v2_modify_qp,
-       .query_qp = hns_roce_v2_query_qp,
-       .destroy_qp = hns_roce_v2_destroy_qp,
-       .modify_cq = hns_roce_v2_modify_cq,
-       .post_send = hns_roce_v2_post_send,
-       .post_recv = hns_roce_v2_post_recv,
-       .req_notify_cq = hns_roce_v2_req_notify_cq,
-       .poll_cq = hns_roce_v2_poll_cq,
-};
-
-static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
-       {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
-       {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
-       {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
-       /* required last entry */
-       {0, }
-};
-
-static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
-                                 struct hnae3_handle *handle)
+static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
 {
-       const struct pci_device_id *id;
+       u32 doorbell[2];
 
-       id = pci_match_id(hns_roce_hw_v2_pci_tbl, hr_dev->pci_dev);
-       if (!id) {
-               dev_err(hr_dev->dev, "device is not compatible!\n");
-               return -ENXIO;
+       doorbell[0] = 0;
+       doorbell[1] = 0;
+
+       if (eq->type_flag == HNS_ROCE_AEQ) {
+               roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
+                              HNS_ROCE_V2_EQ_DB_CMD_S,
+                              eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
+                              HNS_ROCE_EQ_DB_CMD_AEQ :
+                              HNS_ROCE_EQ_DB_CMD_AEQ_ARMED);
+       } else {
+               roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_TAG_M,
+                              HNS_ROCE_V2_EQ_DB_TAG_S, eq->eqn);
+
+               roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
+                              HNS_ROCE_V2_EQ_DB_CMD_S,
+                              eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
+                              HNS_ROCE_EQ_DB_CMD_CEQ :
+                              HNS_ROCE_EQ_DB_CMD_CEQ_ARMED);
        }
 
-       hr_dev->hw = &hns_roce_hw_v2;
-       hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
-       hr_dev->odb_offset = hr_dev->sdb_offset;
+       roce_set_field(doorbell[1], HNS_ROCE_V2_EQ_DB_PARA_M,
+                      HNS_ROCE_V2_EQ_DB_PARA_S,
+                      (eq->cons_index & HNS_ROCE_V2_CONS_IDX_M));
 
-       /* Get info from NIC driver. */
-       hr_dev->reg_base = handle->rinfo.roce_io_base;
-       hr_dev->caps.num_ports = 1;
-       hr_dev->iboe.netdevs[0] = handle->rinfo.netdev;
-       hr_dev->iboe.phy_port[0] = 0;
+       hns_roce_write64_k(doorbell, eq->doorbell);
+}
+
+static void hns_roce_v2_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
+                                                 struct hns_roce_aeqe *aeqe,
+                                                 u32 qpn)
+{
+       struct device *dev = hr_dev->dev;
+       int sub_type;
+
+       dev_warn(dev, "Local work queue catastrophic error.\n");
+       sub_type = roce_get_field(aeqe->asyn, HNS_ROCE_V2_AEQE_SUB_TYPE_M,
+                                 HNS_ROCE_V2_AEQE_SUB_TYPE_S);
+       switch (sub_type) {
+       case HNS_ROCE_LWQCE_QPC_ERROR:
+               dev_warn(dev, "QP %d, QPC error.\n", qpn);
+               break;
+       case HNS_ROCE_LWQCE_MTU_ERROR:
+               dev_warn(dev, "QP %d, MTU error.\n", qpn);
+               break;
+       case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
+               dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn);
+               break;
+       case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
+               dev_warn(dev, "QP %d, WQE addr error.\n", qpn);
+               break;
+       case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
+               dev_warn(dev, "QP %d, WQE shift error.\n", qpn);
+               break;
+       default:
+               dev_err(dev, "Unhandled sub_event type %d.\n", sub_type);
+               break;
+       }
+}
+
+static void hns_roce_v2_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
+                                           struct hns_roce_aeqe *aeqe, u32 qpn)
+{
+       struct device *dev = hr_dev->dev;
+       int sub_type;
+
+       dev_warn(dev, "Local access violation work queue error.\n");
+       sub_type = roce_get_field(aeqe->asyn, HNS_ROCE_V2_AEQE_SUB_TYPE_M,
+                                 HNS_ROCE_V2_AEQE_SUB_TYPE_S);
+       switch (sub_type) {
+       case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
+               dev_warn(dev, "QP %d, R_key violation.\n", qpn);
+               break;
+       case HNS_ROCE_LAVWQE_LENGTH_ERROR:
+               dev_warn(dev, "QP %d, length error.\n", qpn);
+               break;
+       case HNS_ROCE_LAVWQE_VA_ERROR:
+               dev_warn(dev, "QP %d, VA error.\n", qpn);
+               break;
+       case HNS_ROCE_LAVWQE_PD_ERROR:
+               dev_err(dev, "QP %d, PD error.\n", qpn);
+               break;
+       case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
+               dev_warn(dev, "QP %d, rw acc error.\n", qpn);
+               break;
+       case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
+               dev_warn(dev, "QP %d, key state error.\n", qpn);
+               break;
+       case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
+               dev_warn(dev, "QP %d, MR operation error.\n", qpn);
+               break;
+       default:
+               dev_err(dev, "Unhandled sub_event type %d.\n", sub_type);
+               break;
+       }
+}
+
+static void hns_roce_v2_qp_err_handle(struct hns_roce_dev *hr_dev,
+                                     struct hns_roce_aeqe *aeqe,
+                                     int event_type)
+{
+       struct device *dev = hr_dev->dev;
+       u32 qpn;
+
+       qpn = roce_get_field(aeqe->event.qp_event.qp,
+                            HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
+                            HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
+
+       switch (event_type) {
+       case HNS_ROCE_EVENT_TYPE_COMM_EST:
+               dev_warn(dev, "Communication established.\n");
+               break;
+       case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
+               dev_warn(dev, "Send queue drained.\n");
+               break;
+       case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+               hns_roce_v2_wq_catas_err_handle(hr_dev, aeqe, qpn);
+               break;
+       case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
+               dev_warn(dev, "Invalid request local work queue error.\n");
+               break;
+       case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+               hns_roce_v2_local_wq_access_err_handle(hr_dev, aeqe, qpn);
+               break;
+       default:
+               break;
+       }
+
+       hns_roce_qp_event(hr_dev, qpn, event_type);
+}
+
+static void hns_roce_v2_cq_err_handle(struct hns_roce_dev *hr_dev,
+                                     struct hns_roce_aeqe *aeqe,
+                                     int event_type)
+{
+       struct device *dev = hr_dev->dev;
+       u32 cqn;
+
+       cqn = roce_get_field(aeqe->event.cq_event.cq,
+                            HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
+                            HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
+
+       switch (event_type) {
+       case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
+               dev_warn(dev, "CQ 0x%x access err.\n", cqn);
+               break;
+       case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
+               dev_warn(dev, "CQ 0x%x overflow\n", cqn);
+               break;
+       default:
+               break;
+       }
+
+       hns_roce_cq_event(hr_dev, cqn, event_type);
+}
+
+static struct hns_roce_aeqe *get_aeqe_v2(struct hns_roce_eq *eq, u32 entry)
+{
+       u32 buf_chk_sz;
+       unsigned long off;
+
+       buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
+       off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE;
+
+       return (struct hns_roce_aeqe *)((char *)(eq->buf_list->buf) +
+               off % buf_chk_sz);
+}
+
+static struct hns_roce_aeqe *mhop_get_aeqe(struct hns_roce_eq *eq, u32 entry)
+{
+       u32 buf_chk_sz;
+       unsigned long off;
+
+       buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
+
+       off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE;
+
+       if (eq->hop_num == HNS_ROCE_HOP_NUM_0)
+               return (struct hns_roce_aeqe *)((u8 *)(eq->bt_l0) +
+                       off % buf_chk_sz);
+       else
+               return (struct hns_roce_aeqe *)((u8 *)
+                       (eq->buf[off / buf_chk_sz]) + off % buf_chk_sz);
+}
+
+static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
+{
+       struct hns_roce_aeqe *aeqe;
+
+       if (!eq->hop_num)
+               aeqe = get_aeqe_v2(eq, eq->cons_index);
+       else
+               aeqe = mhop_get_aeqe(eq, eq->cons_index);
+
+       return (roce_get_bit(aeqe->asyn, HNS_ROCE_V2_AEQ_AEQE_OWNER_S) ^
+               !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
+}
+
+static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
+                              struct hns_roce_eq *eq)
+{
+       struct device *dev = hr_dev->dev;
+       struct hns_roce_aeqe *aeqe;
+       int aeqe_found = 0;
+       int event_type;
+
+       while ((aeqe = next_aeqe_sw_v2(eq))) {
+
+               /* Make sure we read AEQ entry after we have checked the
+                * ownership bit
+                */
+               dma_rmb();
+
+               event_type = roce_get_field(aeqe->asyn,
+                                           HNS_ROCE_V2_AEQE_EVENT_TYPE_M,
+                                           HNS_ROCE_V2_AEQE_EVENT_TYPE_S);
+
+               switch (event_type) {
+               case HNS_ROCE_EVENT_TYPE_PATH_MIG:
+                       dev_warn(dev, "Path migrated succeeded.\n");
+                       break;
+               case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
+                       dev_warn(dev, "Path migration failed.\n");
+                       break;
+               case HNS_ROCE_EVENT_TYPE_COMM_EST:
+               case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
+               case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+               case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
+               case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+                       hns_roce_v2_qp_err_handle(hr_dev, aeqe, event_type);
+                       break;
+               case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
+               case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
+               case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
+                       dev_warn(dev, "SRQ not support.\n");
+                       break;
+               case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
+               case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
+                       hns_roce_v2_cq_err_handle(hr_dev, aeqe, event_type);
+                       break;
+               case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
+                       dev_warn(dev, "DB overflow.\n");
+                       break;
+               case HNS_ROCE_EVENT_TYPE_MB:
+                       hns_roce_cmd_event(hr_dev,
+                                       le16_to_cpu(aeqe->event.cmd.token),
+                                       aeqe->event.cmd.status,
+                                       le64_to_cpu(aeqe->event.cmd.out_param));
+                       break;
+               case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
+                       dev_warn(dev, "CEQ overflow.\n");
+                       break;
+               case HNS_ROCE_EVENT_TYPE_FLR:
+                       dev_warn(dev, "Function level reset.\n");
+                       break;
+               default:
+                       dev_err(dev, "Unhandled event %d on EQ %d at idx %u.\n",
+                               event_type, eq->eqn, eq->cons_index);
+                       break;
+               };
+
+               ++eq->cons_index;
+               aeqe_found = 1;
+
+               if (eq->cons_index > (2 * eq->entries - 1)) {
+                       dev_warn(dev, "cons_index overflow, set back to 0.\n");
+                       eq->cons_index = 0;
+               }
+       }
+
+       set_eq_cons_index_v2(eq);
+       return aeqe_found;
+}
+
+static struct hns_roce_ceqe *get_ceqe_v2(struct hns_roce_eq *eq, u32 entry)
+{
+       u32 buf_chk_sz;
+       unsigned long off;
+
+       buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
+       off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQ_ENTRY_SIZE;
+
+       return (struct hns_roce_ceqe *)((char *)(eq->buf_list->buf) +
+               off % buf_chk_sz);
+}
+
+static struct hns_roce_ceqe *mhop_get_ceqe(struct hns_roce_eq *eq, u32 entry)
+{
+       u32 buf_chk_sz;
+       unsigned long off;
+
+       buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
+
+       off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQ_ENTRY_SIZE;
+
+       if (eq->hop_num == HNS_ROCE_HOP_NUM_0)
+               return (struct hns_roce_ceqe *)((u8 *)(eq->bt_l0) +
+                       off % buf_chk_sz);
+       else
+               return (struct hns_roce_ceqe *)((u8 *)(eq->buf[off /
+                       buf_chk_sz]) + off % buf_chk_sz);
+}
+
+static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
+{
+       struct hns_roce_ceqe *ceqe;
+
+       if (!eq->hop_num)
+               ceqe = get_ceqe_v2(eq, eq->cons_index);
+       else
+               ceqe = mhop_get_ceqe(eq, eq->cons_index);
+
+       return (!!(roce_get_bit(ceqe->comp, HNS_ROCE_V2_CEQ_CEQE_OWNER_S))) ^
+               (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
+}
+
+static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
+                              struct hns_roce_eq *eq)
+{
+       struct device *dev = hr_dev->dev;
+       struct hns_roce_ceqe *ceqe;
+       int ceqe_found = 0;
+       u32 cqn;
+
+       while ((ceqe = next_ceqe_sw_v2(eq))) {
+
+               /* Make sure we read CEQ entry after we have checked the
+                * ownership bit
+                */
+               dma_rmb();
+
+               cqn = roce_get_field(ceqe->comp,
+                                    HNS_ROCE_V2_CEQE_COMP_CQN_M,
+                                    HNS_ROCE_V2_CEQE_COMP_CQN_S);
+
+               hns_roce_cq_completion(hr_dev, cqn);
+
+               ++eq->cons_index;
+               ceqe_found = 1;
+
+               if (eq->cons_index > (2 * eq->entries - 1)) {
+                       dev_warn(dev, "cons_index overflow, set back to 0.\n");
+                       eq->cons_index = 0;
+               }
+       }
+
+       set_eq_cons_index_v2(eq);
+
+       return ceqe_found;
+}
+
+static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr)
+{
+       struct hns_roce_eq *eq = eq_ptr;
+       struct hns_roce_dev *hr_dev = eq->hr_dev;
+       int int_work = 0;
+
+       if (eq->type_flag == HNS_ROCE_CEQ)
+               /* Completion event interrupt */
+               int_work = hns_roce_v2_ceq_int(hr_dev, eq);
+       else
+               /* Asychronous event interrupt */
+               int_work = hns_roce_v2_aeq_int(hr_dev, eq);
+
+       return IRQ_RETVAL(int_work);
+}
+
+static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
+{
+       struct hns_roce_dev *hr_dev = dev_id;
+       struct device *dev = hr_dev->dev;
+       int int_work = 0;
+       u32 int_st;
+       u32 int_en;
+
+       /* Abnormal interrupt */
+       int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG);
+       int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG);
+
+       if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) {
+               dev_err(dev, "AEQ overflow!\n");
+
+               roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S, 1);
+               roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
+
+               roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
+               roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
+
+               int_work = 1;
+       } else if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S)) {
+               dev_err(dev, "BUS ERR!\n");
+
+               roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S, 1);
+               roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
+
+               roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
+               roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
+
+               int_work = 1;
+       } else if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S)) {
+               dev_err(dev, "OTHER ERR!\n");
+
+               roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S, 1);
+               roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
+
+               roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
+               roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
+
+               int_work = 1;
+       } else
+               dev_err(dev, "There is no abnormal irq found!\n");
+
+       return IRQ_RETVAL(int_work);
+}
+
+static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev,
+                                       int eq_num, int enable_flag)
+{
+       int i;
+
+       if (enable_flag == EQ_ENABLE) {
+               for (i = 0; i < eq_num; i++)
+                       roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
+                                  i * EQ_REG_OFFSET,
+                                  HNS_ROCE_V2_VF_EVENT_INT_EN_M);
+
+               roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
+                          HNS_ROCE_V2_VF_ABN_INT_EN_M);
+               roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
+                          HNS_ROCE_V2_VF_ABN_INT_CFG_M);
+       } else {
+               for (i = 0; i < eq_num; i++)
+                       roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
+                                  i * EQ_REG_OFFSET,
+                                  HNS_ROCE_V2_VF_EVENT_INT_EN_M & 0x0);
+
+               roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
+                          HNS_ROCE_V2_VF_ABN_INT_EN_M & 0x0);
+               roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
+                          HNS_ROCE_V2_VF_ABN_INT_CFG_M & 0x0);
+       }
+}
+
+static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn)
+{
+       struct device *dev = hr_dev->dev;
+       int ret;
+
+       if (eqn < hr_dev->caps.num_comp_vectors)
+               ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
+                                       0, HNS_ROCE_CMD_DESTROY_CEQC,
+                                       HNS_ROCE_CMD_TIMEOUT_MSECS);
+       else
+               ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
+                                       0, HNS_ROCE_CMD_DESTROY_AEQC,
+                                       HNS_ROCE_CMD_TIMEOUT_MSECS);
+       if (ret)
+               dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn);
+}
+
+static void hns_roce_mhop_free_eq(struct hns_roce_dev *hr_dev,
+                                 struct hns_roce_eq *eq)
+{
+       struct device *dev = hr_dev->dev;
+       u64 idx;
+       u64 size;
+       u32 buf_chk_sz;
+       u32 bt_chk_sz;
+       u32 mhop_num;
+       int eqe_alloc;
+       int ba_num;
+       int i = 0;
+       int j = 0;
+
+       mhop_num = hr_dev->caps.eqe_hop_num;
+       buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
+       bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
+       ba_num = (PAGE_ALIGN(eq->entries * eq->eqe_size) + buf_chk_sz - 1) /
+                buf_chk_sz;
+
+       /* hop_num = 0 */
+       if (mhop_num == HNS_ROCE_HOP_NUM_0) {
+               dma_free_coherent(dev, (unsigned int)(eq->entries *
+                                 eq->eqe_size), eq->bt_l0, eq->l0_dma);
+               return;
+       }
+
+       /* hop_num = 1 or hop = 2 */
+       dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
+       if (mhop_num == 1) {
+               for (i = 0; i < eq->l0_last_num; i++) {
+                       if (i == eq->l0_last_num - 1) {
+                               eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
+                               size = (eq->entries - eqe_alloc) * eq->eqe_size;
+                               dma_free_coherent(dev, size, eq->buf[i],
+                                                 eq->buf_dma[i]);
+                               break;
+                       }
+                       dma_free_coherent(dev, buf_chk_sz, eq->buf[i],
+                                         eq->buf_dma[i]);
+               }
+       } else if (mhop_num == 2) {
+               for (i = 0; i < eq->l0_last_num; i++) {
+                       dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
+                                         eq->l1_dma[i]);
+
+                       for (j = 0; j < bt_chk_sz / 8; j++) {
+                               idx = i * (bt_chk_sz / 8) + j;
+                               if ((i == eq->l0_last_num - 1)
+                                    && j == eq->l1_last_num - 1) {
+                                       eqe_alloc = (buf_chk_sz / eq->eqe_size)
+                                                   * idx;
+                                       size = (eq->entries - eqe_alloc)
+                                               * eq->eqe_size;
+                                       dma_free_coherent(dev, size,
+                                                         eq->buf[idx],
+                                                         eq->buf_dma[idx]);
+                                       break;
+                               }
+                               dma_free_coherent(dev, buf_chk_sz, eq->buf[idx],
+                                                 eq->buf_dma[idx]);
+                       }
+               }
+       }
+       kfree(eq->buf_dma);
+       kfree(eq->buf);
+       kfree(eq->l1_dma);
+       kfree(eq->bt_l1);
+       eq->buf_dma = NULL;
+       eq->buf = NULL;
+       eq->l1_dma = NULL;
+       eq->bt_l1 = NULL;
+}
+
+static void hns_roce_v2_free_eq(struct hns_roce_dev *hr_dev,
+                               struct hns_roce_eq *eq)
+{
+       u32 buf_chk_sz;
+
+       buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
+
+       if (hr_dev->caps.eqe_hop_num) {
+               hns_roce_mhop_free_eq(hr_dev, eq);
+               return;
+       }
+
+       if (eq->buf_list)
+               dma_free_coherent(hr_dev->dev, buf_chk_sz,
+                                 eq->buf_list->buf, eq->buf_list->map);
+}
+
+static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
+                               struct hns_roce_eq *eq,
+                               void *mb_buf)
+{
+       struct hns_roce_eq_context *eqc;
+
+       eqc = mb_buf;
+       memset(eqc, 0, sizeof(struct hns_roce_eq_context));
+
+       /* init eqc */
+       eq->doorbell = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG;
+       eq->hop_num = hr_dev->caps.eqe_hop_num;
+       eq->cons_index = 0;
+       eq->over_ignore = HNS_ROCE_V2_EQ_OVER_IGNORE_0;
+       eq->coalesce = HNS_ROCE_V2_EQ_COALESCE_0;
+       eq->arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED;
+       eq->eqe_ba_pg_sz = hr_dev->caps.eqe_ba_pg_sz;
+       eq->eqe_buf_pg_sz = hr_dev->caps.eqe_buf_pg_sz;
+       eq->shift = ilog2((unsigned int)eq->entries);
+
+       if (!eq->hop_num)
+               eq->eqe_ba = eq->buf_list->map;
+       else
+               eq->eqe_ba = eq->l0_dma;
+
+       /* set eqc state */
+       roce_set_field(eqc->byte_4,
+                      HNS_ROCE_EQC_EQ_ST_M,
+                      HNS_ROCE_EQC_EQ_ST_S,
+                      HNS_ROCE_V2_EQ_STATE_VALID);
+
+       /* set eqe hop num */
+       roce_set_field(eqc->byte_4,
+                      HNS_ROCE_EQC_HOP_NUM_M,
+                      HNS_ROCE_EQC_HOP_NUM_S, eq->hop_num);
+
+       /* set eqc over_ignore */
+       roce_set_field(eqc->byte_4,
+                      HNS_ROCE_EQC_OVER_IGNORE_M,
+                      HNS_ROCE_EQC_OVER_IGNORE_S, eq->over_ignore);
+
+       /* set eqc coalesce */
+       roce_set_field(eqc->byte_4,
+                      HNS_ROCE_EQC_COALESCE_M,
+                      HNS_ROCE_EQC_COALESCE_S, eq->coalesce);
+
+       /* set eqc arm_state */
+       roce_set_field(eqc->byte_4,
+                      HNS_ROCE_EQC_ARM_ST_M,
+                      HNS_ROCE_EQC_ARM_ST_S, eq->arm_st);
+
+       /* set eqn */
+       roce_set_field(eqc->byte_4,
+                      HNS_ROCE_EQC_EQN_M,
+                      HNS_ROCE_EQC_EQN_S, eq->eqn);
+
+       /* set eqe_cnt */
+       roce_set_field(eqc->byte_4,
+                      HNS_ROCE_EQC_EQE_CNT_M,
+                      HNS_ROCE_EQC_EQE_CNT_S,
+                      HNS_ROCE_EQ_INIT_EQE_CNT);
+
+       /* set eqe_ba_pg_sz */
+       roce_set_field(eqc->byte_8,
+                      HNS_ROCE_EQC_BA_PG_SZ_M,
+                      HNS_ROCE_EQC_BA_PG_SZ_S, eq->eqe_ba_pg_sz);
+
+       /* set eqe_buf_pg_sz */
+       roce_set_field(eqc->byte_8,
+                      HNS_ROCE_EQC_BUF_PG_SZ_M,
+                      HNS_ROCE_EQC_BUF_PG_SZ_S, eq->eqe_buf_pg_sz);
+
+       /* set eq_producer_idx */
+       roce_set_field(eqc->byte_8,
+                      HNS_ROCE_EQC_PROD_INDX_M,
+                      HNS_ROCE_EQC_PROD_INDX_S,
+                      HNS_ROCE_EQ_INIT_PROD_IDX);
+
+       /* set eq_max_cnt */
+       roce_set_field(eqc->byte_12,
+                      HNS_ROCE_EQC_MAX_CNT_M,
+                      HNS_ROCE_EQC_MAX_CNT_S, eq->eq_max_cnt);
+
+       /* set eq_period */
+       roce_set_field(eqc->byte_12,
+                      HNS_ROCE_EQC_PERIOD_M,
+                      HNS_ROCE_EQC_PERIOD_S, eq->eq_period);
+
+       /* set eqe_report_timer */
+       roce_set_field(eqc->eqe_report_timer,
+                      HNS_ROCE_EQC_REPORT_TIMER_M,
+                      HNS_ROCE_EQC_REPORT_TIMER_S,
+                      HNS_ROCE_EQ_INIT_REPORT_TIMER);
+
+       /* set eqe_ba [34:3] */
+       roce_set_field(eqc->eqe_ba0,
+                      HNS_ROCE_EQC_EQE_BA_L_M,
+                      HNS_ROCE_EQC_EQE_BA_L_S, eq->eqe_ba >> 3);
+
+       /* set eqe_ba [64:35] */
+       roce_set_field(eqc->eqe_ba1,
+                      HNS_ROCE_EQC_EQE_BA_H_M,
+                      HNS_ROCE_EQC_EQE_BA_H_S, eq->eqe_ba >> 35);
+
+       /* set eq shift */
+       roce_set_field(eqc->byte_28,
+                      HNS_ROCE_EQC_SHIFT_M,
+                      HNS_ROCE_EQC_SHIFT_S, eq->shift);
+
+       /* set eq MSI_IDX */
+       roce_set_field(eqc->byte_28,
+                      HNS_ROCE_EQC_MSI_INDX_M,
+                      HNS_ROCE_EQC_MSI_INDX_S,
+                      HNS_ROCE_EQ_INIT_MSI_IDX);
+
+       /* set cur_eqe_ba [27:12] */
+       roce_set_field(eqc->byte_28,
+                      HNS_ROCE_EQC_CUR_EQE_BA_L_M,
+                      HNS_ROCE_EQC_CUR_EQE_BA_L_S, eq->cur_eqe_ba >> 12);
+
+       /* set cur_eqe_ba [59:28] */
+       roce_set_field(eqc->byte_32,
+                      HNS_ROCE_EQC_CUR_EQE_BA_M_M,
+                      HNS_ROCE_EQC_CUR_EQE_BA_M_S, eq->cur_eqe_ba >> 28);
+
+       /* set cur_eqe_ba [63:60] */
+       roce_set_field(eqc->byte_36,
+                      HNS_ROCE_EQC_CUR_EQE_BA_H_M,
+                      HNS_ROCE_EQC_CUR_EQE_BA_H_S, eq->cur_eqe_ba >> 60);
+
+       /* set eq consumer idx */
+       roce_set_field(eqc->byte_36,
+                      HNS_ROCE_EQC_CONS_INDX_M,
+                      HNS_ROCE_EQC_CONS_INDX_S,
+                      HNS_ROCE_EQ_INIT_CONS_IDX);
+
+       /* set nex_eqe_ba[43:12] */
+       roce_set_field(eqc->nxt_eqe_ba0,
+                      HNS_ROCE_EQC_NXT_EQE_BA_L_M,
+                      HNS_ROCE_EQC_NXT_EQE_BA_L_S, eq->nxt_eqe_ba >> 12);
+
+       /* set nex_eqe_ba[63:44] */
+       roce_set_field(eqc->nxt_eqe_ba1,
+                      HNS_ROCE_EQC_NXT_EQE_BA_H_M,
+                      HNS_ROCE_EQC_NXT_EQE_BA_H_S, eq->nxt_eqe_ba >> 44);
+}
+
+static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
+                                 struct hns_roce_eq *eq)
+{
+       struct device *dev = hr_dev->dev;
+       int eq_alloc_done = 0;
+       int eq_buf_cnt = 0;
+       int eqe_alloc;
+       u32 buf_chk_sz;
+       u32 bt_chk_sz;
+       u32 mhop_num;
+       u64 size;
+       u64 idx;
+       int ba_num;
+       int bt_num;
+       int record_i;
+       int record_j;
+       int i = 0;
+       int j = 0;
+
+       mhop_num = hr_dev->caps.eqe_hop_num;
+       buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
+       bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
+
+       ba_num = (PAGE_ALIGN(eq->entries * eq->eqe_size) + buf_chk_sz - 1)
+                 / buf_chk_sz;
+       bt_num = (ba_num + bt_chk_sz / 8 - 1) / (bt_chk_sz / 8);
+
+       /* hop_num = 0 */
+       if (mhop_num == HNS_ROCE_HOP_NUM_0) {
+               if (eq->entries > buf_chk_sz / eq->eqe_size) {
+                       dev_err(dev, "eq entries %d is larger than buf_pg_sz!",
+                               eq->entries);
+                       return -EINVAL;
+               }
+               eq->bt_l0 = dma_alloc_coherent(dev, eq->entries * eq->eqe_size,
+                                              &(eq->l0_dma), GFP_KERNEL);
+               if (!eq->bt_l0)
+                       return -ENOMEM;
+
+               eq->cur_eqe_ba = eq->l0_dma;
+               eq->nxt_eqe_ba = 0;
+
+               memset(eq->bt_l0, 0, eq->entries * eq->eqe_size);
+
+               return 0;
+       }
+
+       eq->buf_dma = kcalloc(ba_num, sizeof(*eq->buf_dma), GFP_KERNEL);
+       if (!eq->buf_dma)
+               return -ENOMEM;
+       eq->buf = kcalloc(ba_num, sizeof(*eq->buf), GFP_KERNEL);
+       if (!eq->buf)
+               goto err_kcalloc_buf;
+
+       if (mhop_num == 2) {
+               eq->l1_dma = kcalloc(bt_num, sizeof(*eq->l1_dma), GFP_KERNEL);
+               if (!eq->l1_dma)
+                       goto err_kcalloc_l1_dma;
+
+               eq->bt_l1 = kcalloc(bt_num, sizeof(*eq->bt_l1), GFP_KERNEL);
+               if (!eq->bt_l1)
+                       goto err_kcalloc_bt_l1;
+       }
+
+       /* alloc L0 BT */
+       eq->bt_l0 = dma_alloc_coherent(dev, bt_chk_sz, &eq->l0_dma, GFP_KERNEL);
+       if (!eq->bt_l0)
+               goto err_dma_alloc_l0;
+
+       if (mhop_num == 1) {
+               if (ba_num > (bt_chk_sz / 8))
+                       dev_err(dev, "ba_num %d is too large for 1 hop\n",
+                               ba_num);
+
+               /* alloc buf */
+               for (i = 0; i < bt_chk_sz / 8; i++) {
+                       if (eq_buf_cnt + 1 < ba_num) {
+                               size = buf_chk_sz;
+                       } else {
+                               eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
+                               size = (eq->entries - eqe_alloc) * eq->eqe_size;
+                       }
+                       eq->buf[i] = dma_alloc_coherent(dev, size,
+                                                       &(eq->buf_dma[i]),
+                                                       GFP_KERNEL);
+                       if (!eq->buf[i])
+                               goto err_dma_alloc_buf;
+
+                       memset(eq->buf[i], 0, size);
+                       *(eq->bt_l0 + i) = eq->buf_dma[i];
+
+                       eq_buf_cnt++;
+                       if (eq_buf_cnt >= ba_num)
+                               break;
+               }
+               eq->cur_eqe_ba = eq->buf_dma[0];
+               eq->nxt_eqe_ba = eq->buf_dma[1];
+
+       } else if (mhop_num == 2) {
+               /* alloc L1 BT and buf */
+               for (i = 0; i < bt_chk_sz / 8; i++) {
+                       eq->bt_l1[i] = dma_alloc_coherent(dev, bt_chk_sz,
+                                                         &(eq->l1_dma[i]),
+                                                         GFP_KERNEL);
+                       if (!eq->bt_l1[i])
+                               goto err_dma_alloc_l1;
+                       *(eq->bt_l0 + i) = eq->l1_dma[i];
+
+                       for (j = 0; j < bt_chk_sz / 8; j++) {
+                               idx = i * bt_chk_sz / 8 + j;
+                               if (eq_buf_cnt + 1 < ba_num) {
+                                       size = buf_chk_sz;
+                               } else {
+                                       eqe_alloc = (buf_chk_sz / eq->eqe_size)
+                                                   * idx;
+                                       size = (eq->entries - eqe_alloc)
+                                               * eq->eqe_size;
+                               }
+                               eq->buf[idx] = dma_alloc_coherent(dev, size,
+                                                           &(eq->buf_dma[idx]),
+                                                           GFP_KERNEL);
+                               if (!eq->buf[idx])
+                                       goto err_dma_alloc_buf;
+
+                               memset(eq->buf[idx], 0, size);
+                               *(eq->bt_l1[i] + j) = eq->buf_dma[idx];
+
+                               eq_buf_cnt++;
+                               if (eq_buf_cnt >= ba_num) {
+                                       eq_alloc_done = 1;
+                                       break;
+                               }
+                       }
+
+                       if (eq_alloc_done)
+                               break;
+               }
+               eq->cur_eqe_ba = eq->buf_dma[0];
+               eq->nxt_eqe_ba = eq->buf_dma[1];
+       }
+
+       eq->l0_last_num = i + 1;
+       if (mhop_num == 2)
+               eq->l1_last_num = j + 1;
+
+       return 0;
+
+err_dma_alloc_l1:
+       dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
+       eq->bt_l0 = NULL;
+       eq->l0_dma = 0;
+       for (i -= 1; i >= 0; i--) {
+               dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
+                                 eq->l1_dma[i]);
+
+               for (j = 0; j < bt_chk_sz / 8; j++) {
+                       idx = i * bt_chk_sz / 8 + j;
+                       dma_free_coherent(dev, buf_chk_sz, eq->buf[idx],
+                                         eq->buf_dma[idx]);
+               }
+       }
+       goto err_dma_alloc_l0;
+
+err_dma_alloc_buf:
+       dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
+       eq->bt_l0 = NULL;
+       eq->l0_dma = 0;
+
+       if (mhop_num == 1)
+               for (i -= 1; i >= 0; i--)
+                       dma_free_coherent(dev, buf_chk_sz, eq->buf[i],
+                                         eq->buf_dma[i]);
+       else if (mhop_num == 2) {
+               record_i = i;
+               record_j = j;
+               for (; i >= 0; i--) {
+                       dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
+                                         eq->l1_dma[i]);
+
+                       for (j = 0; j < bt_chk_sz / 8; j++) {
+                               if (i == record_i && j >= record_j)
+                                       break;
+
+                               idx = i * bt_chk_sz / 8 + j;
+                               dma_free_coherent(dev, buf_chk_sz,
+                                                 eq->buf[idx],
+                                                 eq->buf_dma[idx]);
+                       }
+               }
+       }
+
+err_dma_alloc_l0:
+       kfree(eq->bt_l1);
+       eq->bt_l1 = NULL;
+
+err_kcalloc_bt_l1:
+       kfree(eq->l1_dma);
+       eq->l1_dma = NULL;
+
+err_kcalloc_l1_dma:
+       kfree(eq->buf);
+       eq->buf = NULL;
+
+err_kcalloc_buf:
+       kfree(eq->buf_dma);
+       eq->buf_dma = NULL;
+
+       return -ENOMEM;
+}
+
+static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
+                                struct hns_roce_eq *eq,
+                                unsigned int eq_cmd)
+{
+       struct device *dev = hr_dev->dev;
+       struct hns_roce_cmd_mailbox *mailbox;
+       u32 buf_chk_sz = 0;
+       int ret;
+
+       /* Allocate mailbox memory */
+       mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+
+       if (!hr_dev->caps.eqe_hop_num) {
+               buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
+
+               eq->buf_list = kzalloc(sizeof(struct hns_roce_buf_list),
+                                      GFP_KERNEL);
+               if (!eq->buf_list) {
+                       ret = -ENOMEM;
+                       goto free_cmd_mbox;
+               }
+
+               eq->buf_list->buf = dma_alloc_coherent(dev, buf_chk_sz,
+                                                      &(eq->buf_list->map),
+                                                      GFP_KERNEL);
+               if (!eq->buf_list->buf) {
+                       ret = -ENOMEM;
+                       goto err_alloc_buf;
+               }
+
+               memset(eq->buf_list->buf, 0, buf_chk_sz);
+       } else {
+               ret = hns_roce_mhop_alloc_eq(hr_dev, eq);
+               if (ret) {
+                       ret = -ENOMEM;
+                       goto free_cmd_mbox;
+               }
+       }
+
+       hns_roce_config_eqc(hr_dev, eq, mailbox->buf);
+
+       ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0,
+                               eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS);
+       if (ret) {
+               dev_err(dev, "[mailbox cmd] create eqc failed.\n");
+               goto err_cmd_mbox;
+       }
+
+       hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+       return 0;
+
+err_cmd_mbox:
+       if (!hr_dev->caps.eqe_hop_num)
+               dma_free_coherent(dev, buf_chk_sz, eq->buf_list->buf,
+                                 eq->buf_list->map);
+       else {
+               hns_roce_mhop_free_eq(hr_dev, eq);
+               goto free_cmd_mbox;
+       }
+
+err_alloc_buf:
+       kfree(eq->buf_list);
+
+free_cmd_mbox:
+       hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+       return ret;
+}
+
+static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
+{
+       struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
+       struct device *dev = hr_dev->dev;
+       struct hns_roce_eq *eq;
+       unsigned int eq_cmd;
+       int irq_num;
+       int eq_num;
+       int other_num;
+       int comp_num;
+       int aeq_num;
+       int i, j, k;
+       int ret;
+
+       other_num = hr_dev->caps.num_other_vectors;
+       comp_num = hr_dev->caps.num_comp_vectors;
+       aeq_num = hr_dev->caps.num_aeq_vectors;
+
+       eq_num = comp_num + aeq_num;
+       irq_num = eq_num + other_num;
+
+       eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
+       if (!eq_table->eq)
+               return -ENOMEM;
+
+       for (i = 0; i < irq_num; i++) {
+               hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN,
+                                              GFP_KERNEL);
+               if (!hr_dev->irq_names[i]) {
+                       ret = -ENOMEM;
+                       goto err_failed_kzalloc;
+               }
+       }
+
+       /* create eq */
+       for (j = 0; j < eq_num; j++) {
+               eq = &eq_table->eq[j];
+               eq->hr_dev = hr_dev;
+               eq->eqn = j;
+               if (j < comp_num) {
+                       /* CEQ */
+                       eq_cmd = HNS_ROCE_CMD_CREATE_CEQC;
+                       eq->type_flag = HNS_ROCE_CEQ;
+                       eq->entries = hr_dev->caps.ceqe_depth;
+                       eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE;
+                       eq->irq = hr_dev->irq[j + other_num + aeq_num];
+                       eq->eq_max_cnt = HNS_ROCE_CEQ_DEFAULT_BURST_NUM;
+                       eq->eq_period = HNS_ROCE_CEQ_DEFAULT_INTERVAL;
+               } else {
+                       /* AEQ */
+                       eq_cmd = HNS_ROCE_CMD_CREATE_AEQC;
+                       eq->type_flag = HNS_ROCE_AEQ;
+                       eq->entries = hr_dev->caps.aeqe_depth;
+                       eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE;
+                       eq->irq = hr_dev->irq[j - comp_num + other_num];
+                       eq->eq_max_cnt = HNS_ROCE_AEQ_DEFAULT_BURST_NUM;
+                       eq->eq_period = HNS_ROCE_AEQ_DEFAULT_INTERVAL;
+               }
+
+               ret = hns_roce_v2_create_eq(hr_dev, eq, eq_cmd);
+               if (ret) {
+                       dev_err(dev, "eq create failed.\n");
+                       goto err_create_eq_fail;
+               }
+       }
+
+       /* enable irq */
+       hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_ENABLE);
+
+       /* irq contains: abnormal + AEQ + CEQ*/
+       for (k = 0; k < irq_num; k++)
+               if (k < other_num)
+                       snprintf((char *)hr_dev->irq_names[k],
+                                HNS_ROCE_INT_NAME_LEN, "hns-abn-%d", k);
+               else if (k < (other_num + aeq_num))
+                       snprintf((char *)hr_dev->irq_names[k],
+                                HNS_ROCE_INT_NAME_LEN, "hns-aeq-%d",
+                                k - other_num);
+               else
+                       snprintf((char *)hr_dev->irq_names[k],
+                                HNS_ROCE_INT_NAME_LEN, "hns-ceq-%d",
+                                k - other_num - aeq_num);
+
+       for (k = 0; k < irq_num; k++) {
+               if (k < other_num)
+                       ret = request_irq(hr_dev->irq[k],
+                                         hns_roce_v2_msix_interrupt_abn,
+                                         0, hr_dev->irq_names[k], hr_dev);
+
+               else if (k < (other_num + comp_num))
+                       ret = request_irq(eq_table->eq[k - other_num].irq,
+                                         hns_roce_v2_msix_interrupt_eq,
+                                         0, hr_dev->irq_names[k + aeq_num],
+                                         &eq_table->eq[k - other_num]);
+               else
+                       ret = request_irq(eq_table->eq[k - other_num].irq,
+                                         hns_roce_v2_msix_interrupt_eq,
+                                         0, hr_dev->irq_names[k - comp_num],
+                                         &eq_table->eq[k - other_num]);
+               if (ret) {
+                       dev_err(dev, "Request irq error!\n");
+                       goto err_request_irq_fail;
+               }
+       }
+
+       return 0;
+
+err_request_irq_fail:
+       for (k -= 1; k >= 0; k--)
+               if (k < other_num)
+                       free_irq(hr_dev->irq[k], hr_dev);
+               else
+                       free_irq(eq_table->eq[k - other_num].irq,
+                                &eq_table->eq[k - other_num]);
+
+err_create_eq_fail:
+       for (j -= 1; j >= 0; j--)
+               hns_roce_v2_free_eq(hr_dev, &eq_table->eq[j]);
+
+err_failed_kzalloc:
+       for (i -= 1; i >= 0; i--)
+               kfree(hr_dev->irq_names[i]);
+       kfree(eq_table->eq);
+
+       return ret;
+}
+
+static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
+{
+       struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
+       int irq_num;
+       int eq_num;
+       int i;
+
+       eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
+       irq_num = eq_num + hr_dev->caps.num_other_vectors;
+
+       /* Disable irq */
+       hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
+
+       for (i = 0; i < hr_dev->caps.num_other_vectors; i++)
+               free_irq(hr_dev->irq[i], hr_dev);
+
+       for (i = 0; i < eq_num; i++) {
+               hns_roce_v2_destroy_eqc(hr_dev, i);
+
+               free_irq(eq_table->eq[i].irq, &eq_table->eq[i]);
+
+               hns_roce_v2_free_eq(hr_dev, &eq_table->eq[i]);
+       }
+
+       for (i = 0; i < irq_num; i++)
+               kfree(hr_dev->irq_names[i]);
+
+       kfree(eq_table->eq);
+}
+
+static const struct hns_roce_hw hns_roce_hw_v2 = {
+       .cmq_init = hns_roce_v2_cmq_init,
+       .cmq_exit = hns_roce_v2_cmq_exit,
+       .hw_profile = hns_roce_v2_profile,
+       .post_mbox = hns_roce_v2_post_mbox,
+       .chk_mbox = hns_roce_v2_chk_mbox,
+       .set_gid = hns_roce_v2_set_gid,
+       .set_mac = hns_roce_v2_set_mac,
+       .write_mtpt = hns_roce_v2_write_mtpt,
+       .rereg_write_mtpt = hns_roce_v2_rereg_write_mtpt,
+       .write_cqc = hns_roce_v2_write_cqc,
+       .set_hem = hns_roce_v2_set_hem,
+       .clear_hem = hns_roce_v2_clear_hem,
+       .modify_qp = hns_roce_v2_modify_qp,
+       .query_qp = hns_roce_v2_query_qp,
+       .destroy_qp = hns_roce_v2_destroy_qp,
+       .modify_cq = hns_roce_v2_modify_cq,
+       .post_send = hns_roce_v2_post_send,
+       .post_recv = hns_roce_v2_post_recv,
+       .req_notify_cq = hns_roce_v2_req_notify_cq,
+       .poll_cq = hns_roce_v2_poll_cq,
+       .init_eq = hns_roce_v2_init_eq_table,
+       .cleanup_eq = hns_roce_v2_cleanup_eq_table,
+};
+
+static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
+       {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
+       {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
+       {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
+       /* required last entry */
+       {0, }
+};
+
+MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl);
+
+static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
+                                 struct hnae3_handle *handle)
+{
+       const struct pci_device_id *id;
+       int i;
+
+       id = pci_match_id(hns_roce_hw_v2_pci_tbl, hr_dev->pci_dev);
+       if (!id) {
+               dev_err(hr_dev->dev, "device is not compatible!\n");
+               return -ENXIO;
+       }
+
+       hr_dev->hw = &hns_roce_hw_v2;
+       hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
+       hr_dev->odb_offset = hr_dev->sdb_offset;
+
+       /* Get info from NIC driver. */
+       hr_dev->reg_base = handle->rinfo.roce_io_base;
+       hr_dev->caps.num_ports = 1;
+       hr_dev->iboe.netdevs[0] = handle->rinfo.netdev;
+       hr_dev->iboe.phy_port[0] = 0;
+
+       addrconf_addr_eui48((u8 *)&hr_dev->ib_dev.node_guid,
+                           hr_dev->iboe.netdevs[0]->dev_addr);
+
+       for (i = 0; i < HNS_ROCE_V2_MAX_IRQ_NUM; i++)
+               hr_dev->irq[i] = pci_irq_vector(handle->pdev,
+                                               i + handle->rinfo.base_vector);
 
        /* cmd issue mode: 0 is poll, 1 is event */
-       hr_dev->cmd_mod = 0;
+       hr_dev->cmd_mod = 1;
        hr_dev->loop_idc = 0;
 
        return 0;