]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/commitdiff
RDMA/odp: Provide ib_umem_odp_release() to undo the allocs
authorJason Gunthorpe <jgg@mellanox.com>
Mon, 19 Aug 2019 11:17:05 +0000 (14:17 +0300)
committerJason Gunthorpe <jgg@mellanox.com>
Wed, 21 Aug 2019 17:08:42 +0000 (14:08 -0300)
Now that there are allocator APIs that return the ib_umem_odp directly
it should be freed through a umem_odp free'er as well.

Link: https://lore.kernel.org/r/20190819111710.18440-8-leon@kernel.org
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
drivers/infiniband/core/umem.c
drivers/infiniband/core/umem_odp.c
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/mlx5/odp.c

index 9a39c45cd1e6660e324f2103c1e17467ecd294c8..37eb8643ec2974f05aa748138bedd157ebdca64b 100644 (file)
@@ -326,15 +326,6 @@ umem_kfree:
 }
 EXPORT_SYMBOL(ib_umem_get);
 
-static void __ib_umem_release_tail(struct ib_umem *umem)
-{
-       mmdrop(umem->owning_mm);
-       if (umem->is_odp)
-               kfree(to_ib_umem_odp(umem));
-       else
-               kfree(umem);
-}
-
 /**
  * ib_umem_release - release memory pinned with ib_umem_get
  * @umem: umem struct to release
@@ -343,17 +334,14 @@ void ib_umem_release(struct ib_umem *umem)
 {
        if (!umem)
                return;
-
-       if (umem->is_odp) {
-               ib_umem_odp_release(to_ib_umem_odp(umem));
-               __ib_umem_release_tail(umem);
-               return;
-       }
+       if (umem->is_odp)
+               return ib_umem_odp_release(to_ib_umem_odp(umem));
 
        __ib_umem_release(umem->context->device, umem, 1);
 
        atomic64_sub(ib_umem_num_pages(umem), &umem->owning_mm->pinned_vm);
-       __ib_umem_release_tail(umem);
+       mmdrop(umem->owning_mm);
+       kfree(umem);
 }
 EXPORT_SYMBOL(ib_umem_release);
 
index 6a88bd0fcb33200152665225a321e3f564a673e9..3d4bbafa441cbe010a52e9b264da2d469351362f 100644 (file)
@@ -523,7 +523,10 @@ void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
                vfree(umem_odp->page_list);
        }
        put_per_mm(umem_odp);
+       mmdrop(umem_odp->umem.owning_mm);
+       kfree(umem_odp);
 }
+EXPORT_SYMBOL(ib_umem_odp_release);
 
 /*
  * Map for DMA and insert a single page into the on-demand paging page tables.
index fc1106ddc3f6d0cd1c2de9d56930fd35f4e7ef4d..b7da619614e4511f3d6ec43dd9213d95530178b3 100644 (file)
@@ -1629,7 +1629,7 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
                 * so that there will not be any invalidations in
                 * flight, looking at the *mr struct.
                 */
-               ib_umem_release(umem);
+               ib_umem_odp_release(umem_odp);
                atomic_sub(npages, &dev->mdev->priv.reg_pages);
 
                /* Avoid double-freeing the umem. */
index ad209ae44f05d3d516f79689a0d8aff58eb88e2d..a660dc2b21f43d1c5aca87515bc8e91d532fde67 100644 (file)
@@ -206,7 +206,7 @@ static void mr_leaf_free_action(struct work_struct *work)
        mr->parent = NULL;
        synchronize_srcu(&mr->dev->mr_srcu);
 
-       ib_umem_release(&odp->umem);
+       ib_umem_odp_release(odp);
        if (imr->live)
                mlx5_ib_update_xlt(imr, idx, 1, 0,
                                   MLX5_IB_UPD_XLT_INDIRECT |
@@ -472,7 +472,7 @@ next_mr:
                                        mr->access_flags);
                if (IS_ERR(mtt)) {
                        mutex_unlock(&odp_mr->umem_mutex);
-                       ib_umem_release(&odp->umem);
+                       ib_umem_odp_release(odp);
                        return ERR_CAST(mtt);
                }
 
@@ -526,7 +526,7 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
 
        imr = implicit_mr_alloc(&pd->ibpd, umem_odp, 1, access_flags);
        if (IS_ERR(imr)) {
-               ib_umem_release(&umem_odp->umem);
+               ib_umem_odp_release(umem_odp);
                return ERR_CAST(imr);
        }