]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
RDMA/hns: Use IDA interface to manage uar index
authorYangyang Li <liyangyang20@huawei.com>
Thu, 19 Aug 2021 01:36:18 +0000 (09:36 +0800)
committerJason Gunthorpe <jgg@nvidia.com>
Tue, 24 Aug 2021 12:15:16 +0000 (09:15 -0300)
Switch uar index allocation and release from hns' own bitmap interface to
IDA interface.

Link: https://lore.kernel.org/r/1629336980-17499-2-git-send-email-liangwenpeng@huawei.com
Signed-off-by: Yangyang Li <liyangyang20@huawei.com>
Signed-off-by: Wenpeng Liang <liangwenpeng@huawei.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/hw/hns/hns_roce_alloc.c
drivers/infiniband/hw/hns/hns_roce_device.h
drivers/infiniband/hw/hns/hns_roce_main.c
drivers/infiniband/hw/hns/hns_roce_pd.c

index 1b02d3bc9bae2d218b92e88eb0f573f7ad7799c4..6ae506e9773795f006476caea53d4f6ad93b1527 100644 (file)
@@ -253,5 +253,5 @@ void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev)
        hns_roce_cleanup_cq_table(hr_dev);
        ida_destroy(&hr_dev->mr_table.mtpt_ida.ida);
        ida_destroy(&hr_dev->pd_ida.ida);
-       hns_roce_cleanup_uar_table(hr_dev);
+       ida_destroy(&hr_dev->uar_ida.ida);
 }
index 0c3eb11639778d1cb0262cb625e03e8ab6ea873c..01906f3d0889d14a8fff851ed709f837352d1738 100644 (file)
@@ -963,7 +963,7 @@ struct hns_roce_dev {
        struct hns_roce_cmdq    cmd;
        struct hns_roce_ida pd_ida;
        struct hns_roce_ida xrcd_ida;
-       struct hns_roce_uar_table uar_table;
+       struct hns_roce_ida uar_ida;
        struct hns_roce_mr_table  mr_table;
        struct hns_roce_cq_table  cq_table;
        struct hns_roce_srq_table srq_table;
@@ -1118,10 +1118,8 @@ static inline u8 get_tclass(const struct ib_global_route *grh)
               grh->traffic_class >> DSCP_SHIFT : grh->traffic_class;
 }
 
-int hns_roce_init_uar_table(struct hns_roce_dev *dev);
+void hns_roce_init_uar_table(struct hns_roce_dev *dev);
 int hns_roce_uar_alloc(struct hns_roce_dev *dev, struct hns_roce_uar *uar);
-void hns_roce_uar_free(struct hns_roce_dev *dev, struct hns_roce_uar *uar);
-void hns_roce_cleanup_uar_table(struct hns_roce_dev *dev);
 
 int hns_roce_cmd_init(struct hns_roce_dev *hr_dev);
 void hns_roce_cmd_cleanup(struct hns_roce_dev *hr_dev);
index 23b88a5a372f9e26ff35b26eb538a6ebaea7db01..7ab685a5027980c52359f52807d08754baa1157d 100644 (file)
@@ -325,7 +325,7 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
        return 0;
 
 error_fail_copy_to_udata:
-       hns_roce_uar_free(hr_dev, &context->uar);
+       ida_free(&hr_dev->uar_ida.ida, (int)context->uar.logic_idx);
 
 error_fail_uar_alloc:
        return ret;
@@ -334,8 +334,9 @@ error_fail_uar_alloc:
 static void hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
 {
        struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext);
+       struct hns_roce_dev *hr_dev = to_hr_dev(ibcontext->device);
 
-       hns_roce_uar_free(to_hr_dev(ibcontext->device), &context->uar);
+       ida_free(&hr_dev->uar_ida.ida, (int)context->uar.logic_idx);
 }
 
 static int hns_roce_mmap(struct ib_ucontext *context,
@@ -737,11 +738,7 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
                mutex_init(&hr_dev->pgdir_mutex);
        }
 
-       ret = hns_roce_init_uar_table(hr_dev);
-       if (ret) {
-               dev_err(dev, "Failed to initialize uar table. aborting\n");
-               return ret;
-       }
+       hns_roce_init_uar_table(hr_dev);
 
        ret = hns_roce_uar_alloc(hr_dev, &hr_dev->priv_uar);
        if (ret) {
@@ -780,10 +777,9 @@ err_qp_table_free:
                ida_destroy(&hr_dev->xrcd_ida.ida);
 
        ida_destroy(&hr_dev->pd_ida.ida);
-       hns_roce_uar_free(hr_dev, &hr_dev->priv_uar);
 
 err_uar_table_free:
-       hns_roce_cleanup_uar_table(hr_dev);
+       ida_destroy(&hr_dev->uar_ida.ida);
        return ret;
 }
 
index ea566363098564fa4bc3ec9412cfd5ea6d980f4d..81ffad77ae420b9ec3a8c19ed09bb9e8a2f410fa 100644 (file)
@@ -85,13 +85,18 @@ int hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
 
 int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
 {
+       struct hns_roce_ida *uar_ida = &hr_dev->uar_ida;
        struct resource *res;
-       int ret;
+       int id;
 
        /* Using bitmap to manager UAR index */
-       ret = hns_roce_bitmap_alloc(&hr_dev->uar_table.bitmap, &uar->logic_idx);
-       if (ret)
+       id = ida_alloc_range(&uar_ida->ida, uar_ida->min, uar_ida->max,
+                            GFP_KERNEL);
+       if (id < 0) {
+               ibdev_err(&hr_dev->ib_dev, "failed to alloc uar id(%d).\n", id);
                return -ENOMEM;
+       }
+       uar->logic_idx = (unsigned long)id;
 
        if (uar->logic_idx > 0 && hr_dev->caps.phy_num_uars > 1)
                uar->index = (uar->logic_idx - 1) %
@@ -102,6 +107,7 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
        if (!dev_is_pci(hr_dev->dev)) {
                res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
                if (!res) {
+                       ida_free(&uar_ida->ida, id);
                        dev_err(&hr_dev->pdev->dev, "memory resource not found!\n");
                        return -EINVAL;
                }
@@ -114,22 +120,13 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
        return 0;
 }
 
-void hns_roce_uar_free(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
+void hns_roce_init_uar_table(struct hns_roce_dev *hr_dev)
 {
-       hns_roce_bitmap_free(&hr_dev->uar_table.bitmap, uar->logic_idx);
-}
+       struct hns_roce_ida *uar_ida = &hr_dev->uar_ida;
 
-int hns_roce_init_uar_table(struct hns_roce_dev *hr_dev)
-{
-       return hns_roce_bitmap_init(&hr_dev->uar_table.bitmap,
-                                   hr_dev->caps.num_uars,
-                                   hr_dev->caps.num_uars - 1,
-                                   hr_dev->caps.reserved_uars, 0);
-}
-
-void hns_roce_cleanup_uar_table(struct hns_roce_dev *hr_dev)
-{
-       hns_roce_bitmap_cleanup(&hr_dev->uar_table.bitmap);
+       ida_init(&uar_ida->ida);
+       uar_ida->max = hr_dev->caps.num_uars - 1;
+       uar_ida->min = hr_dev->caps.reserved_uars;
 }
 
 static int hns_roce_xrcd_alloc(struct hns_roce_dev *hr_dev, u32 *xrcdn)