]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/commitdiff
RDMA/hns: Modify fields of struct hns_roce_srq
authorYixian Liu <liuyixian@huawei.com>
Tue, 5 Nov 2019 11:07:57 +0000 (19:07 +0800)
committerJason Gunthorpe <jgg@mellanox.com>
Fri, 8 Nov 2019 20:37:54 +0000 (16:37 -0400)
Use wqe_cnt instead of max which means the queue size of srq, and remove
wqe_ctr which is not used.

Link: https://lore.kernel.org/r/1572952082-6681-5-git-send-email-liweihang@hisilicon.com
Signed-off-by: Yixian Liu <liuyixian@huawei.com>
Signed-off-by: Weihang Li <liweihang@hisilicon.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
drivers/infiniband/hw/hns/hns_roce_device.h
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
drivers/infiniband/hw/hns/hns_roce_srq.c

index c461cffa97216732c98f02cf489bec2c4462143a..ce777ce06ea2ea13501910f7228e4766b907fb31 100644 (file)
@@ -517,9 +517,8 @@ struct hns_roce_idx_que {
 
 struct hns_roce_srq {
        struct ib_srq           ibsrq;
-       void (*event)(struct hns_roce_srq *srq, enum hns_roce_event event);
        unsigned long           srqn;
-       int                     max;
+       u32                     wqe_cnt;
        int                     max_gs;
        int                     wqe_shift;
        void __iomem            *db_reg_l;
@@ -535,8 +534,8 @@ struct hns_roce_srq {
        spinlock_t              lock;
        int                     head;
        int                     tail;
-       u16                     wqe_ctr;
        struct mutex            mutex;
+       void (*event)(struct hns_roce_srq *srq, enum hns_roce_event event);
 };
 
 struct hns_roce_uar_table {
index 0883fce0e4005418f9bfc49a72706418876440b1..4700bac1c6e22902c3a4c84a5e4893961c4135dc 100644 (file)
@@ -6040,7 +6040,7 @@ static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev,
                       hr_dev->caps.srqwqe_hop_num));
        roce_set_field(srq_context->byte_4_srqn_srqst,
                       SRQC_BYTE_4_SRQ_SHIFT_M, SRQC_BYTE_4_SRQ_SHIFT_S,
-                      ilog2(srq->max));
+                      ilog2(srq->wqe_cnt));
 
        roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQN_M,
                       SRQC_BYTE_4_SRQN_S, srq->srqn);
@@ -6126,7 +6126,7 @@ static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
        int ret;
 
        if (srq_attr_mask & IB_SRQ_LIMIT) {
-               if (srq_attr->srq_limit >= srq->max)
+               if (srq_attr->srq_limit >= srq->wqe_cnt)
                        return -EINVAL;
 
                mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
@@ -6186,7 +6186,7 @@ static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
                                  SRQC_BYTE_8_SRQ_LIMIT_WL_S);
 
        attr->srq_limit = limit_wl;
-       attr->max_wr    = srq->max - 1;
+       attr->max_wr    = srq->wqe_cnt - 1;
        attr->max_sge   = srq->max_gs;
 
        memcpy(srq_context, mailbox->buf, sizeof(*srq_context));
@@ -6239,7 +6239,7 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
 
        spin_lock_irqsave(&srq->lock, flags);
 
-       ind = srq->head & (srq->max - 1);
+       ind = srq->head & (srq->wqe_cnt - 1);
 
        for (nreq = 0; wr; ++nreq, wr = wr->next) {
                if (unlikely(wr->num_sge > srq->max_gs)) {
@@ -6254,7 +6254,7 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
                        break;
                }
 
-               wqe_idx = find_empty_entry(&srq->idx_que, srq->max);
+               wqe_idx = find_empty_entry(&srq->idx_que, srq->wqe_cnt);
                if (wqe_idx < 0) {
                        ret = -ENOMEM;
                        *bad_wr = wr;
@@ -6278,7 +6278,7 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
                }
 
                srq->wrid[wqe_idx] = wr->wr_id;
-               ind = (ind + 1) & (srq->max - 1);
+               ind = (ind + 1) & (srq->wqe_cnt - 1);
        }
 
        if (likely(nreq)) {
index d96041d806f63b302fb0c45db3538a102ac8a282..6f9d1d250e4a14f63121cb6558586c787e8b5a3f 100644 (file)
@@ -255,7 +255,7 @@ static int hns_roce_create_idx_que(struct ib_pd *pd, struct hns_roce_srq *srq,
        struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
        struct hns_roce_idx_que *idx_que = &srq->idx_que;
 
-       idx_que->bitmap = bitmap_zalloc(srq->max, GFP_KERNEL);
+       idx_que->bitmap = bitmap_zalloc(srq->wqe_cnt, GFP_KERNEL);
        if (!idx_que->bitmap)
                return -ENOMEM;
 
@@ -281,7 +281,7 @@ static int create_kernel_srq(struct hns_roce_srq *srq, int srq_buf_size)
                return -ENOMEM;
 
        srq->head = 0;
-       srq->tail = srq->max - 1;
+       srq->tail = srq->wqe_cnt - 1;
 
        ret = hns_roce_mtt_init(hr_dev, srq->buf.npages, srq->buf.page_shift,
                                &srq->mtt);
@@ -312,7 +312,7 @@ static int create_kernel_srq(struct hns_roce_srq *srq, int srq_buf_size)
        if (ret)
                goto err_kernel_idx_buf;
 
-       srq->wrid = kvmalloc_array(srq->max, sizeof(u64), GFP_KERNEL);
+       srq->wrid = kvmalloc_array(srq->wqe_cnt, sizeof(u64), GFP_KERNEL);
        if (!srq->wrid) {
                ret = -ENOMEM;
                goto err_kernel_idx_buf;
@@ -358,7 +358,7 @@ static void destroy_kernel_srq(struct hns_roce_dev *hr_dev,
 }
 
 int hns_roce_create_srq(struct ib_srq *ib_srq,
-                       struct ib_srq_init_attr *srq_init_attr,
+                       struct ib_srq_init_attr *init_attr,
                        struct ib_udata *udata)
 {
        struct hns_roce_dev *hr_dev = to_hr_dev(ib_srq->device);
@@ -370,24 +370,24 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
        u32 cqn;
 
        /* Check the actual SRQ wqe and SRQ sge num */
-       if (srq_init_attr->attr.max_wr >= hr_dev->caps.max_srq_wrs ||
-           srq_init_attr->attr.max_sge > hr_dev->caps.max_srq_sges)
+       if (init_attr->attr.max_wr >= hr_dev->caps.max_srq_wrs ||
+           init_attr->attr.max_sge > hr_dev->caps.max_srq_sges)
                return -EINVAL;
 
        mutex_init(&srq->mutex);
        spin_lock_init(&srq->lock);
 
-       srq->max = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
-       srq->max_gs = srq_init_attr->attr.max_sge;
+       srq->wqe_cnt = roundup_pow_of_two(init_attr->attr.max_wr + 1);
+       srq->max_gs = init_attr->attr.max_sge;
 
        srq_desc_size = max(16, 16 * srq->max_gs);
 
        srq->wqe_shift = ilog2(srq_desc_size);
 
-       srq_buf_size = srq->max * srq_desc_size;
+       srq_buf_size = srq->wqe_cnt * srq_desc_size;
 
        srq->idx_que.entry_sz = HNS_ROCE_IDX_QUE_ENTRY_SZ;
-       srq->idx_que.buf_size = srq->max * srq->idx_que.entry_sz;
+       srq->idx_que.buf_size = srq->wqe_cnt * srq->idx_que.entry_sz;
        srq->mtt.mtt_type = MTT_TYPE_SRQWQE;
        srq->idx_que.mtt.mtt_type = MTT_TYPE_IDX;
 
@@ -405,8 +405,8 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
                }
        }
 
-       cqn = ib_srq_has_cq(srq_init_attr->srq_type) ?
-             to_hr_cq(srq_init_attr->ext.cq)->cqn : 0;
+       cqn = ib_srq_has_cq(init_attr->srq_type) ?
+             to_hr_cq(init_attr->ext.cq)->cqn : 0;
 
        srq->db_reg_l = hr_dev->reg_base + SRQ_DB_REG;
 
@@ -453,7 +453,7 @@ void hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
                hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
        } else {
                kvfree(srq->wrid);
-               hns_roce_buf_free(hr_dev, srq->max << srq->wqe_shift,
+               hns_roce_buf_free(hr_dev, srq->wqe_cnt << srq->wqe_shift,
                                  &srq->buf);
        }
        ib_umem_release(srq->idx_que.umem);