]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
IB/core: Ensure an invalidate_range callback on ODP MR
authorIra Weiny <ira.weiny@intel.com>
Wed, 13 Mar 2019 19:05:59 +0000 (12:05 -0700)
committerJason Gunthorpe <jgg@mellanox.com>
Tue, 26 Mar 2019 19:39:40 +0000 (16:39 -0300)
No device supports ODP MR without an invalidate_range callback.

Warn on any any device which attempts to support ODP without supplying
this callback.

Then we can remove the checks for the callback within the code.

This stems from the discussion

https://www.spinics.net/lists/linux-rdma/msg76460.html

...which concluded this code was no longer necessary.

Acked-by: John Hubbard <jhubbard@nvidia.com>
Reviewed-by: Haggai Eran <haggaie@mellanox.com>
Signed-off-by: Ira Weiny <ira.weiny@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
drivers/infiniband/core/umem.c
drivers/infiniband/core/umem_odp.c

index fe5551562dbcd344d9567feda67d394fd0d69fe1..89a7d57f9fa5f225a8c47f60e1c36b509680afd2 100644 (file)
@@ -138,6 +138,11 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
        mmgrab(mm);
 
        if (access & IB_ACCESS_ON_DEMAND) {
+               if (WARN_ON_ONCE(!context->invalidate_range)) {
+                       ret = -EINVAL;
+                       goto umem_kfree;
+               }
+
                ret = ib_umem_odp_get(to_ib_umem_odp(umem), access);
                if (ret)
                        goto umem_kfree;
index e6ec79ad9cc8cd8820f2bed98ed3c14ffb595169..6f8c36fcda782eaaa9642a4514dd08c066431f9d 100644 (file)
@@ -241,7 +241,7 @@ static struct ib_ucontext_per_mm *alloc_per_mm(struct ib_ucontext *ctx,
        per_mm->mm = mm;
        per_mm->umem_tree = RB_ROOT_CACHED;
        init_rwsem(&per_mm->umem_rwsem);
-       per_mm->active = ctx->invalidate_range;
+       per_mm->active = true;
 
        rcu_read_lock();
        per_mm->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
@@ -503,7 +503,6 @@ static int ib_umem_odp_map_dma_single_page(
        struct ib_umem *umem = &umem_odp->umem;
        struct ib_device *dev = umem->context->device;
        dma_addr_t dma_addr;
-       int stored_page = 0;
        int remove_existing_mapping = 0;
        int ret = 0;
 
@@ -528,7 +527,6 @@ static int ib_umem_odp_map_dma_single_page(
                umem_odp->dma_list[page_index] = dma_addr | access_mask;
                umem_odp->page_list[page_index] = page;
                umem->npages++;
-               stored_page = 1;
        } else if (umem_odp->page_list[page_index] == page) {
                umem_odp->dma_list[page_index] |= access_mask;
        } else {
@@ -540,11 +538,9 @@ static int ib_umem_odp_map_dma_single_page(
        }
 
 out:
-       /* On Demand Paging - avoid pinning the page */
-       if (umem->context->invalidate_range || !stored_page)
-               put_page(page);
+       put_page(page);
 
-       if (remove_existing_mapping && umem->context->invalidate_range) {
+       if (remove_existing_mapping) {
                ib_umem_notifier_start_account(umem_odp);
                umem->context->invalidate_range(
                        umem_odp,
@@ -754,9 +750,6 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt,
                                 */
                                set_page_dirty(head_page);
                        }
-                       /* on demand pinning support */
-                       if (!umem->context->invalidate_range)
-                               put_page(page);
                        umem_odp->page_list[idx] = NULL;
                        umem_odp->dma_list[idx] = 0;
                        umem->npages--;