]> git.proxmox.com Git - mirror_ubuntu-disco-kernel.git/commitdiff
mm: Convert i_mmap_lock to a mutex
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Wed, 25 May 2011 00:12:06 +0000 (17:12 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 25 May 2011 15:39:18 +0000 (08:39 -0700)
Straightforward conversion of i_mmap_lock to a mutex.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Hugh Dickins <hughd@google.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: David Miller <davem@davemloft.net>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Jeff Dike <jdike@addtoit.com>
Cc: Richard Weinberger <richard@nod.at>
Cc: Tony Luck <tony.luck@intel.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Namhyung Kim <namhyung@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
17 files changed:
Documentation/lockstat.txt
Documentation/vm/locking
arch/x86/mm/hugetlbpage.c
fs/hugetlbfs/inode.c
fs/inode.c
include/linux/fs.h
include/linux/mmu_notifier.h
kernel/fork.c
mm/filemap.c
mm/filemap_xip.c
mm/fremap.c
mm/hugetlb.c
mm/memory-failure.c
mm/memory.c
mm/mmap.c
mm/mremap.c
mm/rmap.c

index 65f4c795015dead0cfde0f60381b7efcec67da9b..9c0a80d17a23b9bdf1bb9ab9672e5360be7e14b5 100644 (file)
@@ -136,7 +136,7 @@ View the top contending locks:
                              dcache_lock:          1037           1161           0.38          45.32         774.51           6611         243371           0.15         306.48       77387.24
                          &inode->i_mutex:           161            286 18446744073709       62882.54     1244614.55           3653          20598 18446744073709       62318.60     1693822.74
                          &zone->lru_lock:            94             94           0.53           7.33          92.10           4366          32690           0.29          59.81       16350.06
-              &inode->i_data.i_mmap_lock:            79             79           0.40           3.77          53.03          11779          87755           0.28         116.93       29898.44
+              &inode->i_data.i_mmap_mutex:            79             79           0.40           3.77          53.03          11779          87755           0.28         116.93       29898.44
                         &q->__queue_lock:            48             50           0.52          31.62          86.31            774          13131           0.17         113.08       12277.52
                         &rq->rq_lock_key:            43             47           0.74          68.50         170.63           3706          33929           0.22         107.99       17460.62
                       &rq->rq_lock_key#2:            39             46           0.75           6.68          49.03           2979          32292           0.17         125.17       17137.63
index 25fadb448760008dc6408ed0cca2a72c66b2efd0..f61228bd639577195b4e5142d1388e33136b7ace 100644 (file)
@@ -66,7 +66,7 @@ in some cases it is not really needed. Eg, vm_start is modified by
 expand_stack(), it is hard to come up with a destructive scenario without 
 having the vmlist protection in this case.
 
-The page_table_lock nests with the inode i_mmap_lock and the kmem cache
+The page_table_lock nests with the inode i_mmap_mutex and the kmem cache
 c_spinlock spinlocks.  This is okay, since the kmem code asks for pages after
 dropping c_spinlock.  The page_table_lock also nests with pagecache_lock and
 pagemap_lru_lock spinlocks, and no code asks for memory with these locks
index d4203988504ae871e9e6bf4f8a294d0e8b696dff..f581a18c0d4d7d07aac5cdfa8fb443106e3d5e92 100644 (file)
@@ -72,7 +72,7 @@ static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
        if (!vma_shareable(vma, addr))
                return;
 
-       spin_lock(&mapping->i_mmap_lock);
+       mutex_lock(&mapping->i_mmap_mutex);
        vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) {
                if (svma == vma)
                        continue;
@@ -97,7 +97,7 @@ static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
                put_page(virt_to_page(spte));
        spin_unlock(&mm->page_table_lock);
 out:
-       spin_unlock(&mapping->i_mmap_lock);
+       mutex_unlock(&mapping->i_mmap_mutex);
 }
 
 /*
index b9eeb1cd03ff540dc87aa31f43dc9eb03fbafb61..e7a035781b7dd30694acd0b3ee104758a3ffcf87 100644 (file)
@@ -412,10 +412,10 @@ static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
        pgoff = offset >> PAGE_SHIFT;
 
        i_size_write(inode, offset);
-       spin_lock(&mapping->i_mmap_lock);
+       mutex_lock(&mapping->i_mmap_mutex);
        if (!prio_tree_empty(&mapping->i_mmap))
                hugetlb_vmtruncate_list(&mapping->i_mmap, pgoff);
-       spin_unlock(&mapping->i_mmap_lock);
+       mutex_unlock(&mapping->i_mmap_mutex);
        truncate_hugepages(inode, offset);
        return 0;
 }
index 7a7284c71abd7b943c9f1c15f7ea8562f838f871..88aa3bcc768194499fbbc0998df7eff68ad76f75 100644 (file)
@@ -326,7 +326,7 @@ void address_space_init_once(struct address_space *mapping)
        memset(mapping, 0, sizeof(*mapping));
        INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
        spin_lock_init(&mapping->tree_lock);
-       spin_lock_init(&mapping->i_mmap_lock);
+       mutex_init(&mapping->i_mmap_mutex);
        INIT_LIST_HEAD(&mapping->private_list);
        spin_lock_init(&mapping->private_lock);
        INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
index 5d2c86bdf5ba655a59ac4ec0ae567990c3912701..5bb9e826019b2fa27bf64a493bbba6f638381a32 100644 (file)
@@ -634,7 +634,7 @@ struct address_space {
        unsigned int            i_mmap_writable;/* count VM_SHARED mappings */
        struct prio_tree_root   i_mmap;         /* tree of private and shared mappings */
        struct list_head        i_mmap_nonlinear;/*list VM_NONLINEAR mappings */
-       spinlock_t              i_mmap_lock;    /* protect tree, count, list */
+       struct mutex            i_mmap_mutex;   /* protect tree, count, list */
        unsigned long           nrpages;        /* number of total pages */
        pgoff_t                 writeback_index;/* writeback starts here */
        const struct address_space_operations *a_ops;   /* methods */
index cc2e7dfea9d7f6a57661155cb88ca487863e66af..a877dfc243eb5b55f2613636496258fdc42b187a 100644 (file)
@@ -150,7 +150,7 @@ struct mmu_notifier_ops {
  * Therefore notifier chains can only be traversed when either
  *
  * 1. mmap_sem is held.
- * 2. One of the reverse map locks is held (i_mmap_lock or anon_vma->lock).
+ * 2. One of the reverse map locks is held (i_mmap_mutex or anon_vma->lock).
  * 3. No other concurrent thread can access the list (release)
  */
 struct mmu_notifier {
index 4eef925477fc2e1cd3334ff06c180951896abc2f..927692734bcfc04d8b4b221d957b4bf7396f164f 100644 (file)
@@ -383,14 +383,14 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
                        get_file(file);
                        if (tmp->vm_flags & VM_DENYWRITE)
                                atomic_dec(&inode->i_writecount);
-                       spin_lock(&mapping->i_mmap_lock);
+                       mutex_lock(&mapping->i_mmap_mutex);
                        if (tmp->vm_flags & VM_SHARED)
                                mapping->i_mmap_writable++;
                        flush_dcache_mmap_lock(mapping);
                        /* insert tmp into the share list, just after mpnt */
                        vma_prio_tree_add(tmp, mpnt);
                        flush_dcache_mmap_unlock(mapping);
-                       spin_unlock(&mapping->i_mmap_lock);
+                       mutex_unlock(&mapping->i_mmap_mutex);
                }
 
                /*
index 8144f87dcbb4a5b410d521e51d7e86ed1c7d86c1..88354ae0b1fd7275adc91cbd19aad9bceb2aa140 100644 (file)
 /*
  * Lock ordering:
  *
- *  ->i_mmap_lock              (truncate_pagecache)
+ *  ->i_mmap_mutex             (truncate_pagecache)
  *    ->private_lock           (__free_pte->__set_page_dirty_buffers)
  *      ->swap_lock            (exclusive_swap_page, others)
  *        ->mapping->tree_lock
  *
  *  ->i_mutex
- *    ->i_mmap_lock            (truncate->unmap_mapping_range)
+ *    ->i_mmap_mutex           (truncate->unmap_mapping_range)
  *
  *  ->mmap_sem
- *    ->i_mmap_lock
+ *    ->i_mmap_mutex
  *      ->page_table_lock or pte_lock  (various, mainly in memory.c)
  *        ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock)
  *
@@ -84,7 +84,7 @@
  *    sb_lock                  (fs/fs-writeback.c)
  *    ->mapping->tree_lock     (__sync_single_inode)
  *
- *  ->i_mmap_lock
+ *  ->i_mmap_mutex
  *    ->anon_vma.lock          (vma_adjust)
  *
  *  ->anon_vma.lock
  *
  *  (code doesn't rely on that order, so you could switch it around)
  *  ->tasklist_lock             (memory_failure, collect_procs_ao)
- *    ->i_mmap_lock
+ *    ->i_mmap_mutex
  */
 
 /*
index 83364df74a33811ea7aea971412bfe91d4fe9c17..93356cd12828a40eb4d635a9e0e0bea3d6ba0790 100644 (file)
@@ -183,7 +183,7 @@ __xip_unmap (struct address_space * mapping,
                return;
 
 retry:
-       spin_lock(&mapping->i_mmap_lock);
+       mutex_lock(&mapping->i_mmap_mutex);
        vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
                mm = vma->vm_mm;
                address = vma->vm_start +
@@ -201,7 +201,7 @@ retry:
                        page_cache_release(page);
                }
        }
-       spin_unlock(&mapping->i_mmap_lock);
+       mutex_unlock(&mapping->i_mmap_mutex);
 
        if (locked) {
                mutex_unlock(&xip_sparse_mutex);
index ec520c7b28dffedb5027de6b27834831938671cd..7f4123056e0603ce3bae84c3fea05a7444160dc8 100644 (file)
@@ -211,13 +211,13 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
                        }
                        goto out;
                }
-               spin_lock(&mapping->i_mmap_lock);
+               mutex_lock(&mapping->i_mmap_mutex);
                flush_dcache_mmap_lock(mapping);
                vma->vm_flags |= VM_NONLINEAR;
                vma_prio_tree_remove(vma, &mapping->i_mmap);
                vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
                flush_dcache_mmap_unlock(mapping);
-               spin_unlock(&mapping->i_mmap_lock);
+               mutex_unlock(&mapping->i_mmap_mutex);
        }
 
        if (vma->vm_flags & VM_LOCKED) {
index bbb4a5bbb9582055945237692b8938a731c5492a..5fd68b95c671bafd2687ab696e04a0ae73a17012 100644 (file)
@@ -2205,7 +2205,7 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
        unsigned long sz = huge_page_size(h);
 
        /*
-        * A page gathering list, protected by per file i_mmap_lock. The
+        * A page gathering list, protected by per file i_mmap_mutex. The
         * lock is used to avoid list corruption from multiple unmapping
         * of the same page since we are using page->lru.
         */
@@ -2274,9 +2274,9 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
                          unsigned long end, struct page *ref_page)
 {
-       spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
+       mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
        __unmap_hugepage_range(vma, start, end, ref_page);
-       spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
+       mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
 }
 
 /*
@@ -2308,7 +2308,7 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
         * this mapping should be shared between all the VMAs,
         * __unmap_hugepage_range() is called as the lock is already held
         */
-       spin_lock(&mapping->i_mmap_lock);
+       mutex_lock(&mapping->i_mmap_mutex);
        vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
                /* Do not unmap the current VMA */
                if (iter_vma == vma)
@@ -2326,7 +2326,7 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
                                address, address + huge_page_size(h),
                                page);
        }
-       spin_unlock(&mapping->i_mmap_lock);
+       mutex_unlock(&mapping->i_mmap_mutex);
 
        return 1;
 }
@@ -2810,7 +2810,7 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
        BUG_ON(address >= end);
        flush_cache_range(vma, address, end);
 
-       spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
+       mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
        spin_lock(&mm->page_table_lock);
        for (; address < end; address += huge_page_size(h)) {
                ptep = huge_pte_offset(mm, address);
@@ -2825,7 +2825,7 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
                }
        }
        spin_unlock(&mm->page_table_lock);
-       spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
+       mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
 
        flush_tlb_range(vma, start, end);
 }
index 2b9a5eef39e0661d48e7a9976780345587aff80c..12178ec32ab559f1a6090863cab5f5aeefd848dc 100644 (file)
@@ -429,7 +429,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
         */
 
        read_lock(&tasklist_lock);
-       spin_lock(&mapping->i_mmap_lock);
+       mutex_lock(&mapping->i_mmap_mutex);
        for_each_process(tsk) {
                pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
 
@@ -449,7 +449,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
                                add_to_kill(tsk, page, vma, to_kill, tkc);
                }
        }
-       spin_unlock(&mapping->i_mmap_lock);
+       mutex_unlock(&mapping->i_mmap_mutex);
        read_unlock(&tasklist_lock);
 }
 
index 18655878b9f8abd1b49abf6ebc38d2a4f04e0ecf..7bbe4d3df756e30966eed1c1f6333db502b27a0e 100644 (file)
@@ -2667,12 +2667,12 @@ void unmap_mapping_range(struct address_space *mapping,
                details.last_index = ULONG_MAX;
 
 
-       spin_lock(&mapping->i_mmap_lock);
+       mutex_lock(&mapping->i_mmap_mutex);
        if (unlikely(!prio_tree_empty(&mapping->i_mmap)))
                unmap_mapping_range_tree(&mapping->i_mmap, &details);
        if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
                unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
-       spin_unlock(&mapping->i_mmap_lock);
+       mutex_unlock(&mapping->i_mmap_mutex);
 }
 EXPORT_SYMBOL(unmap_mapping_range);
 
index 50cb04bb56bfd806f54f8baa01c9cfc5c84c94a0..26efbfca0b2083257616d722941f57dd8b0015c7 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -194,7 +194,7 @@ error:
 }
 
 /*
- * Requires inode->i_mapping->i_mmap_lock
+ * Requires inode->i_mapping->i_mmap_mutex
  */
 static void __remove_shared_vm_struct(struct vm_area_struct *vma,
                struct file *file, struct address_space *mapping)
@@ -222,9 +222,9 @@ void unlink_file_vma(struct vm_area_struct *vma)
 
        if (file) {
                struct address_space *mapping = file->f_mapping;
-               spin_lock(&mapping->i_mmap_lock);
+               mutex_lock(&mapping->i_mmap_mutex);
                __remove_shared_vm_struct(vma, file, mapping);
-               spin_unlock(&mapping->i_mmap_lock);
+               mutex_unlock(&mapping->i_mmap_mutex);
        }
 }
 
@@ -446,13 +446,13 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
                mapping = vma->vm_file->f_mapping;
 
        if (mapping)
-               spin_lock(&mapping->i_mmap_lock);
+               mutex_lock(&mapping->i_mmap_mutex);
 
        __vma_link(mm, vma, prev, rb_link, rb_parent);
        __vma_link_file(vma);
 
        if (mapping)
-               spin_unlock(&mapping->i_mmap_lock);
+               mutex_unlock(&mapping->i_mmap_mutex);
 
        mm->map_count++;
        validate_mm(mm);
@@ -555,7 +555,7 @@ again:                      remove_next = 1 + (end > next->vm_end);
                mapping = file->f_mapping;
                if (!(vma->vm_flags & VM_NONLINEAR))
                        root = &mapping->i_mmap;
-               spin_lock(&mapping->i_mmap_lock);
+               mutex_lock(&mapping->i_mmap_mutex);
                if (insert) {
                        /*
                         * Put into prio_tree now, so instantiated pages
@@ -622,7 +622,7 @@ again:                      remove_next = 1 + (end > next->vm_end);
        if (anon_vma)
                anon_vma_unlock(anon_vma);
        if (mapping)
-               spin_unlock(&mapping->i_mmap_lock);
+               mutex_unlock(&mapping->i_mmap_mutex);
 
        if (remove_next) {
                if (file) {
@@ -2290,7 +2290,7 @@ void exit_mmap(struct mm_struct *mm)
 
 /* Insert vm structure into process list sorted by address
  * and into the inode's i_mmap tree.  If vm_file is non-NULL
- * then i_mmap_lock is taken here.
+ * then i_mmap_mutex is taken here.
  */
 int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
 {
@@ -2532,7 +2532,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
                 */
                if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
                        BUG();
-               spin_lock_nest_lock(&mapping->i_mmap_lock, &mm->mmap_sem);
+               mutex_lock_nest_lock(&mapping->i_mmap_mutex, &mm->mmap_sem);
        }
 }
 
@@ -2559,7 +2559,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
  * vma in this mm is backed by the same anon_vma or address_space.
  *
  * We can take all the locks in random order because the VM code
- * taking i_mmap_lock or anon_vma->lock outside the mmap_sem never
+ * taking i_mmap_mutex or anon_vma->lock outside the mmap_sem never
  * takes more than one of them in a row. Secondly we're protected
  * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex.
  *
@@ -2631,7 +2631,7 @@ static void vm_unlock_mapping(struct address_space *mapping)
                 * AS_MM_ALL_LOCKS can't change to 0 from under us
                 * because we hold the mm_all_locks_mutex.
                 */
-               spin_unlock(&mapping->i_mmap_lock);
+               mutex_unlock(&mapping->i_mmap_mutex);
                if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
                                        &mapping->flags))
                        BUG();
index 909e1e1e99b1fbe5606888225af1be4acffeaf7a..506fa44403df5cc3215cb69ec2d1098a96bb7919 100644 (file)
@@ -93,7 +93,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
                 * and we propagate stale pages into the dst afterward.
                 */
                mapping = vma->vm_file->f_mapping;
-               spin_lock(&mapping->i_mmap_lock);
+               mutex_lock(&mapping->i_mmap_mutex);
        }
 
        /*
@@ -122,7 +122,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
        pte_unmap(new_pte - 1);
        pte_unmap_unlock(old_pte - 1, old_ptl);
        if (mapping)
-               spin_unlock(&mapping->i_mmap_lock);
+               mutex_unlock(&mapping->i_mmap_mutex);
        mmu_notifier_invalidate_range_end(vma->vm_mm, old_start, old_end);
 }
 
index 522e4a93cadda8d33bcc25025c224dc7acacc9d6..f0ef7ea5423a530e4df6a45efe5c78eb8f309788 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -24,7 +24,7 @@
  *   inode->i_alloc_sem (vmtruncate_range)
  *   mm->mmap_sem
  *     page->flags PG_locked (lock_page)
- *       mapping->i_mmap_lock
+ *       mapping->i_mmap_mutex
  *         anon_vma->lock
  *           mm->page_table_lock or pte_lock
  *             zone->lru_lock (in mark_page_accessed, isolate_lru_page)
@@ -646,14 +646,14 @@ static int page_referenced_file(struct page *page,
         * The page lock not only makes sure that page->mapping cannot
         * suddenly be NULLified by truncation, it makes sure that the
         * structure at mapping cannot be freed and reused yet,
-        * so we can safely take mapping->i_mmap_lock.
+        * so we can safely take mapping->i_mmap_mutex.
         */
        BUG_ON(!PageLocked(page));
 
-       spin_lock(&mapping->i_mmap_lock);
+       mutex_lock(&mapping->i_mmap_mutex);
 
        /*
-        * i_mmap_lock does not stabilize mapcount at all, but mapcount
+        * i_mmap_mutex does not stabilize mapcount at all, but mapcount
         * is more likely to be accurate if we note it after spinning.
         */
        mapcount = page_mapcount(page);
@@ -675,7 +675,7 @@ static int page_referenced_file(struct page *page,
                        break;
        }
 
-       spin_unlock(&mapping->i_mmap_lock);
+       mutex_unlock(&mapping->i_mmap_mutex);
        return referenced;
 }
 
@@ -762,7 +762,7 @@ static int page_mkclean_file(struct address_space *mapping, struct page *page)
 
        BUG_ON(PageAnon(page));
 
-       spin_lock(&mapping->i_mmap_lock);
+       mutex_lock(&mapping->i_mmap_mutex);
        vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
                if (vma->vm_flags & VM_SHARED) {
                        unsigned long address = vma_address(page, vma);
@@ -771,7 +771,7 @@ static int page_mkclean_file(struct address_space *mapping, struct page *page)
                        ret += page_mkclean_one(page, vma, address);
                }
        }
-       spin_unlock(&mapping->i_mmap_lock);
+       mutex_unlock(&mapping->i_mmap_mutex);
        return ret;
 }
 
@@ -1119,7 +1119,7 @@ out_mlock:
        /*
         * We need mmap_sem locking, Otherwise VM_LOCKED check makes
         * unstable result and race. Plus, We can't wait here because
-        * we now hold anon_vma->lock or mapping->i_mmap_lock.
+        * we now hold anon_vma->lock or mapping->i_mmap_mutex.
         * if trylock failed, the page remain in evictable lru and later
         * vmscan could retry to move the page to unevictable lru if the
         * page is actually mlocked.
@@ -1345,7 +1345,7 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
        unsigned long max_nl_size = 0;
        unsigned int mapcount;
 
-       spin_lock(&mapping->i_mmap_lock);
+       mutex_lock(&mapping->i_mmap_mutex);
        vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
                unsigned long address = vma_address(page, vma);
                if (address == -EFAULT)
@@ -1391,7 +1391,7 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
        mapcount = page_mapcount(page);
        if (!mapcount)
                goto out;
-       cond_resched_lock(&mapping->i_mmap_lock);
+       cond_resched();
 
        max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK;
        if (max_nl_cursor == 0)
@@ -1413,7 +1413,7 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
                        }
                        vma->vm_private_data = (void *) max_nl_cursor;
                }
-               cond_resched_lock(&mapping->i_mmap_lock);
+               cond_resched();
                max_nl_cursor += CLUSTER_SIZE;
        } while (max_nl_cursor <= max_nl_size);
 
@@ -1425,7 +1425,7 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
        list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
                vma->vm_private_data = NULL;
 out:
-       spin_unlock(&mapping->i_mmap_lock);
+       mutex_unlock(&mapping->i_mmap_mutex);
        return ret;
 }
 
@@ -1544,7 +1544,7 @@ static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *,
 
        if (!mapping)
                return ret;
-       spin_lock(&mapping->i_mmap_lock);
+       mutex_lock(&mapping->i_mmap_mutex);
        vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
                unsigned long address = vma_address(page, vma);
                if (address == -EFAULT)
@@ -1558,7 +1558,7 @@ static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *,
         * never contain migration ptes.  Decide what to do about this
         * limitation to linear when we need rmap_walk() on nonlinear.
         */
-       spin_unlock(&mapping->i_mmap_lock);
+       mutex_unlock(&mapping->i_mmap_mutex);
        return ret;
 }