]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/commitdiff
writeback: Protect inode->i_io_list with inode->i_lock
authorJan Kara <jack@suse.cz>
Wed, 10 Jun 2020 15:36:03 +0000 (17:36 +0200)
committerJan Kara <jack@suse.cz>
Mon, 15 Jun 2020 07:18:11 +0000 (09:18 +0200)
Currently, operations on inode->i_io_list are protected by
wb->list_lock. In the following patches we'll need to maintain
consistency between inode->i_state and inode->i_io_list so change the
code so that inode->i_lock protects also all inode's i_io_list handling.

Reviewed-by: Martijn Coenen <maco@android.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
CC: stable@vger.kernel.org # Prerequisite for "writeback: Avoid skipping inode writeback"
Signed-off-by: Jan Kara <jack@suse.cz>
fs/fs-writeback.c

index a605c3dddabc76b3c013870b3246b0ddc15af9fa..ff0b18331590bd78661edb235690c444a3a6ce39 100644 (file)
@@ -144,6 +144,7 @@ static void inode_io_list_del_locked(struct inode *inode,
                                     struct bdi_writeback *wb)
 {
        assert_spin_locked(&wb->list_lock);
                                     struct bdi_writeback *wb)
 {
        assert_spin_locked(&wb->list_lock);
+       assert_spin_locked(&inode->i_lock);
 
        list_del_init(&inode->i_io_list);
        wb_io_lists_depopulated(wb);
 
        list_del_init(&inode->i_io_list);
        wb_io_lists_depopulated(wb);
@@ -1122,7 +1123,9 @@ void inode_io_list_del(struct inode *inode)
        struct bdi_writeback *wb;
 
        wb = inode_to_wb_and_lock_list(inode);
        struct bdi_writeback *wb;
 
        wb = inode_to_wb_and_lock_list(inode);
+       spin_lock(&inode->i_lock);
        inode_io_list_del_locked(inode, wb);
        inode_io_list_del_locked(inode, wb);
+       spin_unlock(&inode->i_lock);
        spin_unlock(&wb->list_lock);
 }
 EXPORT_SYMBOL(inode_io_list_del);
        spin_unlock(&wb->list_lock);
 }
 EXPORT_SYMBOL(inode_io_list_del);
@@ -1172,8 +1175,10 @@ void sb_clear_inode_writeback(struct inode *inode)
  * the case then the inode must have been redirtied while it was being written
  * out and we don't reset its dirtied_when.
  */
  * the case then the inode must have been redirtied while it was being written
  * out and we don't reset its dirtied_when.
  */
-static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
+static void redirty_tail_locked(struct inode *inode, struct bdi_writeback *wb)
 {
 {
+       assert_spin_locked(&inode->i_lock);
+
        if (!list_empty(&wb->b_dirty)) {
                struct inode *tail;
 
        if (!list_empty(&wb->b_dirty)) {
                struct inode *tail;
 
@@ -1184,6 +1189,13 @@ static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
        inode_io_list_move_locked(inode, wb, &wb->b_dirty);
 }
 
        inode_io_list_move_locked(inode, wb, &wb->b_dirty);
 }
 
+static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
+{
+       spin_lock(&inode->i_lock);
+       redirty_tail_locked(inode, wb);
+       spin_unlock(&inode->i_lock);
+}
+
 /*
  * requeue inode for re-scanning after bdi->b_io list is exhausted.
  */
 /*
  * requeue inode for re-scanning after bdi->b_io list is exhausted.
  */
@@ -1394,7 +1406,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
                 * writeback is not making progress due to locked
                 * buffers. Skip this inode for now.
                 */
                 * writeback is not making progress due to locked
                 * buffers. Skip this inode for now.
                 */
-               redirty_tail(inode, wb);
+               redirty_tail_locked(inode, wb);
                return;
        }
 
                return;
        }
 
@@ -1414,7 +1426,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
                         * retrying writeback of the dirty page/inode
                         * that cannot be performed immediately.
                         */
                         * retrying writeback of the dirty page/inode
                         * that cannot be performed immediately.
                         */
-                       redirty_tail(inode, wb);
+                       redirty_tail_locked(inode, wb);
                }
        } else if (inode->i_state & I_DIRTY) {
                /*
                }
        } else if (inode->i_state & I_DIRTY) {
                /*
@@ -1422,7 +1434,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
                 * such as delayed allocation during submission or metadata
                 * updates after data IO completion.
                 */
                 * such as delayed allocation during submission or metadata
                 * updates after data IO completion.
                 */
-               redirty_tail(inode, wb);
+               redirty_tail_locked(inode, wb);
        } else if (inode->i_state & I_DIRTY_TIME) {
                inode->dirtied_when = jiffies;
                inode_io_list_move_locked(inode, wb, &wb->b_dirty_time);
        } else if (inode->i_state & I_DIRTY_TIME) {
                inode->dirtied_when = jiffies;
                inode_io_list_move_locked(inode, wb, &wb->b_dirty_time);
@@ -1669,8 +1681,8 @@ static long writeback_sb_inodes(struct super_block *sb,
                 */
                spin_lock(&inode->i_lock);
                if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
                 */
                spin_lock(&inode->i_lock);
                if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
+                       redirty_tail_locked(inode, wb);
                        spin_unlock(&inode->i_lock);
                        spin_unlock(&inode->i_lock);
-                       redirty_tail(inode, wb);
                        continue;
                }
                if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
                        continue;
                }
                if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {