]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blobdiff - fs/fs-writeback.c
Merge branch 'stable/for-jens' of git://git.kernel.org/pub/scm/linux/kernel/git/konra...
[mirror_ubuntu-artful-kernel.git] / fs / fs-writeback.c
index 227ff12257f360af7fec8f83f043ff9c650286fa..04cf3b91e5016a1f7e3ceef734c7d85574ec296d 100644 (file)
 #include <linux/tracepoint.h>
 #include "internal.h"
 
-/*
- * The maximum number of pages to writeout in a single bdi flush/kupdate
- * operation.  We do this so we don't hold I_SYNC against an inode for
- * enormous amounts of time, which would block a userspace task which has
- * been forced to throttle against that inode.  Also, the code reevaluates
- * the dirty each time it has written this many pages.
- */
-#define MAX_WRITEBACK_PAGES     1024L
-
 /*
  * Passed into wb_writeback(), essentially a subset of writeback_control
  */
@@ -489,33 +480,8 @@ writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
        return ret;
 }
 
-/*
- * For background writeback the caller does not have the sb pinned
- * before calling writeback. So make sure that we do pin it, so it doesn't
- * go away while we are writing inodes from it.
- */
-static bool pin_sb_for_writeback(struct super_block *sb)
-{
-       spin_lock(&sb_lock);
-       if (list_empty(&sb->s_instances)) {
-               spin_unlock(&sb_lock);
-               return false;
-       }
-
-       sb->s_count++;
-       spin_unlock(&sb_lock);
-
-       if (down_read_trylock(&sb->s_umount)) {
-               if (sb->s_root)
-                       return true;
-               up_read(&sb->s_umount);
-       }
-
-       put_super(sb);
-       return false;
-}
-
-static long writeback_chunk_size(struct wb_writeback_work *work)
+static long writeback_chunk_size(struct backing_dev_info *bdi,
+                                struct wb_writeback_work *work)
 {
        long pages;
 
@@ -534,8 +500,13 @@ static long writeback_chunk_size(struct wb_writeback_work *work)
         */
        if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages)
                pages = LONG_MAX;
-       else
-               pages = min(MAX_WRITEBACK_PAGES, work->nr_pages);
+       else {
+               pages = min(bdi->avg_write_bandwidth / 2,
+                           global_dirty_limit / DIRTY_SCOPE);
+               pages = min(pages, work->nr_pages);
+               pages = round_down(pages + MIN_WRITEBACK_PAGES,
+                                  MIN_WRITEBACK_PAGES);
+       }
 
        return pages;
 }
@@ -596,11 +567,11 @@ static long writeback_sb_inodes(struct super_block *sb,
                spin_lock(&inode->i_lock);
                if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
                        spin_unlock(&inode->i_lock);
-                       requeue_io(inode, wb);
+                       redirty_tail(inode, wb);
                        continue;
                }
                __iget(inode);
-               write_chunk = writeback_chunk_size(work);
+               write_chunk = writeback_chunk_size(wb->bdi, work);
                wbc.nr_to_write = write_chunk;
                wbc.pages_skipped = 0;
 
@@ -646,8 +617,13 @@ static long __writeback_inodes_wb(struct bdi_writeback *wb,
                struct inode *inode = wb_inode(wb->b_io.prev);
                struct super_block *sb = inode->i_sb;
 
-               if (!pin_sb_for_writeback(sb)) {
-                       requeue_io(inode, wb);
+               if (!grab_super_passive(sb)) {
+                       /*
+                        * grab_super_passive() may fail consistently due to
+                        * s_umount being grabbed by someone else. Don't use
+                        * requeue_io() to avoid busy retrying the inode/sb.
+                        */
+                       redirty_tail(inode, wb);
                        continue;
                }
                wrote += writeback_sb_inodes(sb, wb, work);