]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/commitdiff
mmap locking API: convert mmap_sem call sites missed by coccinelle
authorMichel Lespinasse <walken@google.com>
Tue, 9 Jun 2020 04:33:29 +0000 (21:33 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 9 Jun 2020 16:39:14 +0000 (09:39 -0700)
Convert the last few remaining mmap_sem rwsem calls to use the new mmap
locking API.  These were missed by coccinelle for some reason (I think
coccinelle does not support some of the preprocessor constructs in these
files ?)

[akpm@linux-foundation.org: convert linux-next leftovers]
[akpm@linux-foundation.org: more linux-next leftovers]
[akpm@linux-foundation.org: more linux-next leftovers]

Signed-off-by: Michel Lespinasse <walken@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com>
Reviewed-by: Laurent Dufour <ldufour@linux.ibm.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Davidlohr Bueso <dbueso@suse.de>
Cc: David Rientjes <rientjes@google.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Liam Howlett <Liam.Howlett@oracle.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ying Han <yinghan@google.com>
Link: http://lkml.kernel.org/r/20200520052908.204642-6-walken@google.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/arm64/kvm/mmu.c
arch/mips/mm/fault.c
arch/riscv/mm/pageattr.c
arch/x86/kvm/mmu/paging_tmpl.h
drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
drivers/vfio/pci/vfio_pci.c
fs/proc/base.c
lib/test_hmm.c

index 290154e32c0bce360dfe4324ec0080c594b4144e..8c0035cab6b629fa5732fe2e872256da1dd279f8 100644 (file)
@@ -1084,7 +1084,7 @@ void stage2_unmap_vm(struct kvm *kvm)
        int idx;
 
        idx = srcu_read_lock(&kvm->srcu);
-       down_read(&current->mm->mmap_sem);
+       mmap_read_lock(current->mm);
        spin_lock(&kvm->mmu_lock);
 
        slots = kvm_memslots(kvm);
@@ -1092,7 +1092,7 @@ void stage2_unmap_vm(struct kvm *kvm)
                stage2_unmap_memslot(kvm, memslot);
 
        spin_unlock(&kvm->mmu_lock);
-       up_read(&current->mm->mmap_sem);
+       mmap_read_unlock(current->mm);
        srcu_read_unlock(&kvm->srcu, idx);
 }
 
@@ -1848,11 +1848,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        }
 
        /* Let's check if we will get back a huge page backed by hugetlbfs */
-       down_read(&current->mm->mmap_sem);
+       mmap_read_lock(current->mm);
        vma = find_vma_intersection(current->mm, hva, hva + 1);
        if (unlikely(!vma)) {
                kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
-               up_read(&current->mm->mmap_sem);
+               mmap_read_unlock(current->mm);
                return -EFAULT;
        }
 
@@ -1879,7 +1879,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        if (vma_pagesize == PMD_SIZE ||
            (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm)))
                gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
-       up_read(&current->mm->mmap_sem);
+       mmap_read_unlock(current->mm);
 
        /* We need minimum second+third level pages */
        ret = mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm),
@@ -2456,7 +2456,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
            (kvm_phys_size(kvm) >> PAGE_SHIFT))
                return -EFAULT;
 
-       down_read(&current->mm->mmap_sem);
+       mmap_read_lock(current->mm);
        /*
         * A memory region could potentially cover multiple VMAs, and any holes
         * between them, so iterate over all of them to find out if we can map
@@ -2515,7 +2515,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
                stage2_flush_memslot(kvm, memslot);
        spin_unlock(&kvm->mmu_lock);
 out:
-       up_read(&current->mm->mmap_sem);
+       mmap_read_unlock(current->mm);
        return ret;
 }
 
index f8d62cd83b36ce3d3729388789af30937dfb5c3c..9ef2dd39111ed1fa888b86806ae10e892c215b54 100644 (file)
@@ -97,7 +97,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
        if (user_mode(regs))
                flags |= FAULT_FLAG_USER;
 retry:
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        vma = find_vma(mm, address);
        if (!vma)
                goto bad_area;
@@ -190,7 +190,7 @@ good_area:
                }
        }
 
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        return;
 
 /*
@@ -198,7 +198,7 @@ good_area:
  * Fix it, but check if it's kernel or user first..
  */
 bad_area:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
 bad_area_nosemaphore:
        /* User mode accesses just cause a SIGSEGV */
@@ -250,14 +250,14 @@ out_of_memory:
         * We ran out of memory, call the OOM killer, and return the userspace
         * (which will retry the fault, or kill us if we got oom-killed).
         */
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        if (!user_mode(regs))
                goto no_context;
        pagefault_out_of_memory();
        return;
 
 do_sigbus:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
 
        /* Kernel mode? Handle exceptions or die */
        if (!user_mode(regs))
index 51165f73ce7d30840350e22fa75dcaad9844b2ea..ec2c70f84994052d13303e3f6901d9eff454c51e 100644 (file)
@@ -117,10 +117,10 @@ static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask,
        if (!numpages)
                return 0;
 
-       down_read(&init_mm.mmap_sem);
+       mmap_read_lock(&init_mm);
        ret =  walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL,
                                     &masks);
-       up_read(&init_mm.mmap_sem);
+       mmap_read_unlock(&init_mm);
 
        flush_tlb_kernel_range(start, end);
 
index 38c576495048ba0bc44f353afd78974ff36f44bd..a6d484ea110b1d67ee958309f7b774764796fd9f 100644 (file)
@@ -165,22 +165,22 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
                unsigned long pfn;
                unsigned long paddr;
 
-               down_read(&current->mm->mmap_sem);
+               mmap_read_lock(current->mm);
                vma = find_vma_intersection(current->mm, vaddr, vaddr + PAGE_SIZE);
                if (!vma || !(vma->vm_flags & VM_PFNMAP)) {
-                       up_read(&current->mm->mmap_sem);
+                       mmap_read_unlock(current->mm);
                        return -EFAULT;
                }
                pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
                paddr = pfn << PAGE_SHIFT;
                table = memremap(paddr, PAGE_SIZE, MEMREMAP_WB);
                if (!table) {
-                       up_read(&current->mm->mmap_sem);
+                       mmap_read_unlock(current->mm);
                        return -EFAULT;
                }
                ret = CMPXCHG(&table[index], orig_pte, new_pte);
                memunmap(table);
-               up_read(&current->mm->mmap_sem);
+               mmap_read_unlock(current->mm);
        }
 
        return (ret != orig_pte);
index 492b76c29490bde955ea71d42cb292b3cbd825e8..c9e7df0ea5a6b3989b4b61d1bc29c46da93ba645 100644 (file)
@@ -982,9 +982,9 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
        }
 
        mutex_unlock(&bo->mutex);
-       down_read(&current->mm->mmap_sem);
+       mmap_read_lock(current->mm);
        vma = find_vma(current->mm, (unsigned long)userptr);
-       up_read(&current->mm->mmap_sem);
+       mmap_read_unlock(current->mm);
        if (!vma) {
                dev_err(atomisp_dev, "find_vma failed\n");
                kfree(bo->page_obj);
index 3fc198f3eeb5c16cbcc4980550109714233023bc..b5f6ef2d12f64a1dc9e447cf9afbda73c6161252 100644 (file)
@@ -1422,17 +1422,17 @@ static int vfio_pci_zap_and_vma_lock(struct vfio_pci_device *vdev, bool try)
                mutex_unlock(&vdev->vma_lock);
 
                if (try) {
-                       if (!down_read_trylock(&mm->mmap_sem)) {
+                       if (!mmap_read_trylock(mm)) {
                                mmput(mm);
                                return 0;
                        }
                } else {
-                       down_read(&mm->mmap_sem);
+                       mmap_read_lock(mm);
                }
                if (mmget_still_valid(mm)) {
                        if (try) {
                                if (!mutex_trylock(&vdev->vma_lock)) {
-                                       up_read(&mm->mmap_sem);
+                                       mmap_read_unlock(mm);
                                        mmput(mm);
                                        return 0;
                                }
@@ -1454,7 +1454,7 @@ static int vfio_pci_zap_and_vma_lock(struct vfio_pci_device *vdev, bool try)
                        }
                        mutex_unlock(&vdev->vma_lock);
                }
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
                mmput(mm);
        }
 }
index f9c88e4bd8372e74815c58979447d2530c7b81ed..4f0d6f40b8f10c4ae53b0d5f6c1a90604b7dd507 100644 (file)
@@ -2322,7 +2322,7 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx)
        if (!mm)
                goto out_put_task;
 
-       ret = down_read_killable(&mm->mmap_sem);
+       ret = mmap_read_lock_killable(mm);
        if (ret) {
                mmput(mm);
                goto out_put_task;
@@ -2349,7 +2349,7 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx)
                p = genradix_ptr_alloc(&fa, nr_files++, GFP_KERNEL);
                if (!p) {
                        ret = -ENOMEM;
-                       up_read(&mm->mmap_sem);
+                       mmap_read_unlock(mm);
                        mmput(mm);
                        goto out_put_task;
                }
@@ -2358,7 +2358,7 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx)
                p->end = vma->vm_end;
                p->mode = vma->vm_file->f_mode;
        }
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        mmput(mm);
 
        for (i = 0; i < nr_files; i++) {
index 5c1858e325bab9aee92e70837890027289330867..28528285942c9899e36ef20b3394d87d09430f66 100644 (file)
@@ -245,9 +245,9 @@ static int dmirror_range_fault(struct dmirror *dmirror,
                }
 
                range->notifier_seq = mmu_interval_read_begin(range->notifier);
-               down_read(&mm->mmap_sem);
+               mmap_read_lock(mm);
                ret = hmm_range_fault(range);
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
                if (ret) {
                        if (ret == -EBUSY)
                                continue;
@@ -686,7 +686,7 @@ static int dmirror_migrate(struct dmirror *dmirror,
        if (!mmget_not_zero(mm))
                return -EINVAL;
 
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        for (addr = start; addr < end; addr = next) {
                vma = find_vma(mm, addr);
                if (!vma || addr < vma->vm_start ||
@@ -713,7 +713,7 @@ static int dmirror_migrate(struct dmirror *dmirror,
                dmirror_migrate_finalize_and_map(&args, dmirror);
                migrate_vma_finalize(&args);
        }
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        mmput(mm);
 
        /* Return the migrated data for verification. */
@@ -733,7 +733,7 @@ static int dmirror_migrate(struct dmirror *dmirror,
        return ret;
 
 out:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        mmput(mm);
        return ret;
 }
@@ -825,9 +825,9 @@ static int dmirror_range_snapshot(struct dmirror *dmirror,
 
                range->notifier_seq = mmu_interval_read_begin(range->notifier);
 
-               down_read(&mm->mmap_sem);
+               mmap_read_lock(mm);
                ret = hmm_range_fault(range);
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
                if (ret) {
                        if (ret == -EBUSY)
                                continue;