]> git.proxmox.com Git - mirror_ubuntu-disco-kernel.git/commitdiff
mm: prevent concurrent unmap_mapping_range() on the same inode
authorMiklos Szeredi <mszeredi@suse.cz>
Wed, 23 Feb 2011 12:49:47 +0000 (13:49 +0100)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 24 Feb 2011 03:52:52 +0000 (19:52 -0800)
Michael Leun reported that running parallel opens on a fuse filesystem
can trigger a "kernel BUG at mm/truncate.c:475"

Gurudas Pai reported the same bug on NFS.

The reason is, unmap_mapping_range() is not prepared for more than
one concurrent invocation per inode.  For example:

  thread1: going through a big range, stops in the middle of a vma and
     stores the restart address in vm_truncate_count.

  thread2: comes in with a small (e.g. single page) unmap request on
     the same vma, somewhere before restart_address, finds that the
     vma was already unmapped up to the restart address and happily
     returns without doing anything.

Another scenario would be two big unmap requests, both having to
restart the unmapping and each one setting vm_truncate_count to its
own value.  This could go on forever without any of them being able to
finish.

Truncate and hole punching already serialize with i_mutex.  Other
callers of unmap_mapping_range() do not, and it's difficult to get
i_mutex protection for all callers.  In particular ->d_revalidate(),
which calls invalidate_inode_pages2_range() in fuse, may be called
with or without i_mutex.

This patch adds a new mutex to 'struct address_space' to prevent
running multiple concurrent unmap_mapping_range() on the same mapping.

[ We'll hopefully get rid of all this with the upcoming mm
  preemptibility series by Peter Zijlstra, the "mm: Remove i_mmap_mutex
  lockbreak" patch in particular.  But that is for 2.6.39 ]

Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Reported-by: Michael Leun <lkml20101129@newton.leun.net>
Reported-by: Gurudas Pai <gurudas.pai@oracle.com>
Tested-by: Gurudas Pai <gurudas.pai@oracle.com>
Acked-by: Hugh Dickins <hughd@google.com>
Cc: stable@kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
fs/gfs2/main.c
fs/inode.c
fs/nilfs2/btnode.c
fs/nilfs2/btnode.h
fs/nilfs2/mdt.c
fs/nilfs2/page.c
fs/nilfs2/page.h
fs/nilfs2/super.c
include/linux/fs.h
mm/memory.c

index 85ba027d1c4d5a9209d2cb22b7829ea7b08bb3cd..72c31a315d9658e1a03ca830ebbf7a14d06d08f4 100644 (file)
@@ -59,14 +59,7 @@ static void gfs2_init_gl_aspace_once(void *foo)
        struct address_space *mapping = (struct address_space *)(gl + 1);
 
        gfs2_init_glock_once(gl);
-       memset(mapping, 0, sizeof(*mapping));
-       INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
-       spin_lock_init(&mapping->tree_lock);
-       spin_lock_init(&mapping->i_mmap_lock);
-       INIT_LIST_HEAD(&mapping->private_list);
-       spin_lock_init(&mapping->private_lock);
-       INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
-       INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
+       address_space_init_once(mapping);
 }
 
 /**
index da85e56378f3e1d98c241901171e24e83882f189..9c2b795ccc938e3db2d75378bae1ec4fc4e198ff 100644 (file)
@@ -295,6 +295,20 @@ static void destroy_inode(struct inode *inode)
                call_rcu(&inode->i_rcu, i_callback);
 }
 
+void address_space_init_once(struct address_space *mapping)
+{
+       memset(mapping, 0, sizeof(*mapping));
+       INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
+       spin_lock_init(&mapping->tree_lock);
+       spin_lock_init(&mapping->i_mmap_lock);
+       INIT_LIST_HEAD(&mapping->private_list);
+       spin_lock_init(&mapping->private_lock);
+       INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
+       INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
+       mutex_init(&mapping->unmap_mutex);
+}
+EXPORT_SYMBOL(address_space_init_once);
+
 /*
  * These are initializations that only need to be done
  * once, because the fields are idempotent across use
@@ -308,13 +322,7 @@ void inode_init_once(struct inode *inode)
        INIT_LIST_HEAD(&inode->i_devices);
        INIT_LIST_HEAD(&inode->i_wb_list);
        INIT_LIST_HEAD(&inode->i_lru);
-       INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC);
-       spin_lock_init(&inode->i_data.tree_lock);
-       spin_lock_init(&inode->i_data.i_mmap_lock);
-       INIT_LIST_HEAD(&inode->i_data.private_list);
-       spin_lock_init(&inode->i_data.private_lock);
-       INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap);
-       INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear);
+       address_space_init_once(&inode->i_data);
        i_size_ordered_init(inode);
 #ifdef CONFIG_FSNOTIFY
        INIT_HLIST_HEAD(&inode->i_fsnotify_marks);
index 388e9e8f5286f737970ccefa36e919556235e4bf..85f7baa15f5dd8fa1eac905b8c8840c73b0294fa 100644 (file)
 #include "btnode.h"
 
 
-void nilfs_btnode_cache_init_once(struct address_space *btnc)
-{
-       nilfs_mapping_init_once(btnc);
-}
-
 static const struct address_space_operations def_btnode_aops = {
        .sync_page              = block_sync_page,
 };
index 79037494f1e0408c42c41e3147204e480e765b4e..1b8ebd888c2844348a128a37e0781c1b82915aea 100644 (file)
@@ -37,7 +37,6 @@ struct nilfs_btnode_chkey_ctxt {
        struct buffer_head *newbh;
 };
 
-void nilfs_btnode_cache_init_once(struct address_space *);
 void nilfs_btnode_cache_init(struct address_space *, struct backing_dev_info *);
 void nilfs_btnode_cache_clear(struct address_space *);
 struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc,
index 6a0e2a189f60650b3399f4b9d3732ac6037f791f..a0babd2bff6a2e03a924e45110ff1a8698f59fd6 100644 (file)
@@ -454,9 +454,9 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode,
        struct backing_dev_info *bdi = inode->i_sb->s_bdi;
 
        INIT_LIST_HEAD(&shadow->frozen_buffers);
-       nilfs_mapping_init_once(&shadow->frozen_data);
+       address_space_init_once(&shadow->frozen_data);
        nilfs_mapping_init(&shadow->frozen_data, bdi, &shadow_map_aops);
-       nilfs_mapping_init_once(&shadow->frozen_btnodes);
+       address_space_init_once(&shadow->frozen_btnodes);
        nilfs_mapping_init(&shadow->frozen_btnodes, bdi, &shadow_map_aops);
        mi->mi_shadow = shadow;
        return 0;
index 0c432416cfefc608383dc8f7ee58d88140a26f59..a585b35fd6bc201c9d3063005792101da769ab01 100644 (file)
@@ -492,19 +492,6 @@ unsigned nilfs_page_count_clean_buffers(struct page *page,
        return nc;
 }
 
-void nilfs_mapping_init_once(struct address_space *mapping)
-{
-       memset(mapping, 0, sizeof(*mapping));
-       INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
-       spin_lock_init(&mapping->tree_lock);
-       INIT_LIST_HEAD(&mapping->private_list);
-       spin_lock_init(&mapping->private_lock);
-
-       spin_lock_init(&mapping->i_mmap_lock);
-       INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
-       INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
-}
-
 void nilfs_mapping_init(struct address_space *mapping,
                        struct backing_dev_info *bdi,
                        const struct address_space_operations *aops)
index 622df27cd89155d74a94ef786910715f9f3a11ad..2a00953ebd5f1b58b92494dbd684eb6128d76598 100644 (file)
@@ -61,7 +61,6 @@ void nilfs_free_private_page(struct page *);
 int nilfs_copy_dirty_pages(struct address_space *, struct address_space *);
 void nilfs_copy_back_pages(struct address_space *, struct address_space *);
 void nilfs_clear_dirty_pages(struct address_space *);
-void nilfs_mapping_init_once(struct address_space *mapping);
 void nilfs_mapping_init(struct address_space *mapping,
                        struct backing_dev_info *bdi,
                        const struct address_space_operations *aops);
index 58fd707174e108714091c081604f7daa781d10f5..1673b3d99842018206640c77ea9922840c5a2bc4 100644 (file)
@@ -1279,7 +1279,7 @@ static void nilfs_inode_init_once(void *obj)
 #ifdef CONFIG_NILFS_XATTR
        init_rwsem(&ii->xattr_sem);
 #endif
-       nilfs_btnode_cache_init_once(&ii->i_btnode_cache);
+       address_space_init_once(&ii->i_btnode_cache);
        ii->i_bmap = &ii->i_bmap_data;
        inode_init_once(&ii->vfs_inode);
 }
index bd3215940c3746ec22d8351854649f9b5b485f67..97d08d8a7de8da9080eea5cefc12339b70fb855a 100644 (file)
@@ -649,6 +649,7 @@ struct address_space {
        spinlock_t              private_lock;   /* for use by the address_space */
        struct list_head        private_list;   /* ditto */
        struct address_space    *assoc_mapping; /* ditto */
+       struct mutex            unmap_mutex;    /* to protect unmapping */
 } __attribute__((aligned(sizeof(long))));
        /*
         * On most architectures that alignment is already the case; but
@@ -2225,6 +2226,7 @@ extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin);
 
 extern int inode_init_always(struct super_block *, struct inode *);
 extern void inode_init_once(struct inode *);
+extern void address_space_init_once(struct address_space *mapping);
 extern void ihold(struct inode * inode);
 extern void iput(struct inode *);
 extern struct inode * igrab(struct inode *);
index 8e8c183248631cccce48a97943168d1da5e28223..5823698c2b71a9dd79b43986c9929f2cd7485a04 100644 (file)
@@ -2648,6 +2648,7 @@ void unmap_mapping_range(struct address_space *mapping,
                details.last_index = ULONG_MAX;
        details.i_mmap_lock = &mapping->i_mmap_lock;
 
+       mutex_lock(&mapping->unmap_mutex);
        spin_lock(&mapping->i_mmap_lock);
 
        /* Protect against endless unmapping loops */
@@ -2664,6 +2665,7 @@ void unmap_mapping_range(struct address_space *mapping,
        if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
                unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
        spin_unlock(&mapping->i_mmap_lock);
+       mutex_unlock(&mapping->unmap_mutex);
 }
 EXPORT_SYMBOL(unmap_mapping_range);