]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
RDMA/hns: Fix 0-length sge calculation error
authorLang Cheng <chenglang@huawei.com>
Sat, 28 Nov 2020 10:22:37 +0000 (18:22 +0800)
committerJason Gunthorpe <jgg@nvidia.com>
Wed, 2 Dec 2020 00:58:42 +0000 (20:58 -0400)
One RC SQ WQE can store 2 sges but UD can't, so ignore 2 valid sges of
wr.sglist for RC which have been filled in WQE before setting extended
sge.  Either of RC and UD can not contain 0-length sges, so these 0-length
sges should be skipped.

Fixes: 54d6638765b0 ("RDMA/hns: Optimize WQE buffer size calculating process")
Link: https://lore.kernel.org/r/1606558959-48510-2-git-send-email-liweihang@huawei.com
Signed-off-by: Lang Cheng <chenglang@huawei.com>
Signed-off-by: Weihang Li <liweihang@huawei.com>
Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/hw/hns/hns_roce_hw_v2.c

index 8575ad7acce2376cfbd1d49f075943394a1921b2..8d37067a736d6f2929f4fbca5d3b80e779b2d9d8 100644 (file)
@@ -214,25 +214,20 @@ static int fill_ext_sge_inl_data(struct hns_roce_qp *qp,
        return 0;
 }
 
-static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
-                          unsigned int *sge_ind, unsigned int valid_num_sge)
+static void set_extend_sge(struct hns_roce_qp *qp, struct ib_sge *sge,
+                          unsigned int *sge_ind, unsigned int cnt)
 {
        struct hns_roce_v2_wqe_data_seg *dseg;
-       unsigned int cnt = valid_num_sge;
-       struct ib_sge *sge = wr->sg_list;
        unsigned int idx = *sge_ind;
 
-       if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
-               cnt -= HNS_ROCE_SGE_IN_WQE;
-               sge += HNS_ROCE_SGE_IN_WQE;
-       }
-
        while (cnt > 0) {
                dseg = hns_roce_get_extend_sge(qp, idx & (qp->sge.sge_cnt - 1));
-               set_data_seg_v2(dseg, sge);
-               idx++;
+               if (likely(sge->length)) {
+                       set_data_seg_v2(dseg, sge);
+                       idx++;
+                       cnt--;
+               }
                sge++;
-               cnt--;
        }
 
        *sge_ind = idx;
@@ -340,7 +335,8 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
                        }
                }
 
-               set_extend_sge(qp, wr, sge_ind, valid_num_sge);
+               set_extend_sge(qp, wr->sg_list + i, sge_ind,
+                              valid_num_sge - HNS_ROCE_SGE_IN_WQE);
        }
 
        roce_set_field(rc_sq_wqe->byte_16,
@@ -503,7 +499,7 @@ static inline int set_ud_wqe(struct hns_roce_qp *qp,
        if (ret)
                return ret;
 
-       set_extend_sge(qp, wr, &curr_idx, valid_num_sge);
+       set_extend_sge(qp, wr->sg_list, &curr_idx, valid_num_sge);
 
        /*
         * The pipeline can sequentially post all valid WQEs into WQ buffer,