]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - fs/ext4/inode.c
ext4: ext4_inode_info diet
[mirror_ubuntu-bionic-kernel.git] / fs / ext4 / inode.c
index 02bc8cbe7281b3d47c3449a1c4b8e4220685ba52..a99588673566f1dddef44538d3b8ca499d269b42 100644 (file)
@@ -233,6 +233,11 @@ void ext4_evict_inode(struct inode *inode)
        if (is_bad_inode(inode))
                goto no_delete;
 
+       /*
+        * Protect us against freezing - iput() caller didn't have to have any
+        * protection against it
+        */
+       sb_start_intwrite(inode->i_sb);
        handle = ext4_journal_start(inode, ext4_blocks_for_truncate(inode)+3);
        if (IS_ERR(handle)) {
                ext4_std_error(inode->i_sb, PTR_ERR(handle));
@@ -242,6 +247,7 @@ void ext4_evict_inode(struct inode *inode)
                 * cleaned up.
                 */
                ext4_orphan_del(NULL, inode);
+               sb_end_intwrite(inode->i_sb);
                goto no_delete;
        }
 
@@ -273,6 +279,7 @@ void ext4_evict_inode(struct inode *inode)
                stop_handle:
                        ext4_journal_stop(handle);
                        ext4_orphan_del(NULL, inode);
+                       sb_end_intwrite(inode->i_sb);
                        goto no_delete;
                }
        }
@@ -301,6 +308,7 @@ void ext4_evict_inode(struct inode *inode)
        else
                ext4_free_inode(handle, inode);
        ext4_journal_stop(handle);
+       sb_end_intwrite(inode->i_sb);
        return;
 no_delete:
        ext4_clear_inode(inode);        /* We must guarantee clearing of inode... */
@@ -346,6 +354,15 @@ void ext4_da_update_reserve_space(struct inode *inode,
                used = ei->i_reserved_data_blocks;
        }
 
+       if (unlikely(ei->i_allocated_meta_blocks > ei->i_reserved_meta_blocks)) {
+               ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, allocated %d "
+                        "with only %d reserved metadata blocks\n", __func__,
+                        inode->i_ino, ei->i_allocated_meta_blocks,
+                        ei->i_reserved_meta_blocks);
+               WARN_ON(1);
+               ei->i_allocated_meta_blocks = ei->i_reserved_meta_blocks;
+       }
+
        /* Update per-inode reservations */
        ei->i_reserved_data_blocks -= used;
        ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
@@ -544,7 +561,8 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
         * Try to see if we can get the block without requesting a new
         * file system block.
         */
-       down_read((&EXT4_I(inode)->i_data_sem));
+       if (!(flags & EXT4_GET_BLOCKS_NO_LOCK))
+               down_read((&EXT4_I(inode)->i_data_sem));
        if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
                retval = ext4_ext_map_blocks(handle, inode, map, flags &
                                             EXT4_GET_BLOCKS_KEEP_SIZE);
@@ -552,7 +570,8 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
                retval = ext4_ind_map_blocks(handle, inode, map, flags &
                                             EXT4_GET_BLOCKS_KEEP_SIZE);
        }
-       up_read((&EXT4_I(inode)->i_data_sem));
+       if (!(flags & EXT4_GET_BLOCKS_NO_LOCK))
+               up_read((&EXT4_I(inode)->i_data_sem));
 
        if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
                int ret = check_block_validity(inode, map);
@@ -713,11 +732,13 @@ struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
        err = ext4_map_blocks(handle, inode, &map,
                              create ? EXT4_GET_BLOCKS_CREATE : 0);
 
+       /* ensure we send some value back into *errp */
+       *errp = 0;
+
        if (err < 0)
                *errp = err;
        if (err <= 0)
                return NULL;
-       *errp = 0;
 
        bh = sb_getblk(inode->i_sb, map.m_pblk);
        if (!bh) {
@@ -1171,6 +1192,17 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
        struct ext4_inode_info *ei = EXT4_I(inode);
        unsigned int md_needed;
        int ret;
+       ext4_lblk_t save_last_lblock;
+       int save_len;
+
+       /*
+        * We will charge metadata quota at writeout time; this saves
+        * us from metadata over-estimation, though we may go over by
+        * a small amount in the end.  Here we just reserve for data.
+        */
+       ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
+       if (ret)
+               return ret;
 
        /*
         * recalculate the amount of metadata blocks to reserve
@@ -1179,32 +1211,31 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
         */
 repeat:
        spin_lock(&ei->i_block_reservation_lock);
+       /*
+        * ext4_calc_metadata_amount() has side effects, which we have
+        * to be prepared undo if we fail to claim space.
+        */
+       save_len = ei->i_da_metadata_calc_len;
+       save_last_lblock = ei->i_da_metadata_calc_last_lblock;
        md_needed = EXT4_NUM_B2C(sbi,
                                 ext4_calc_metadata_amount(inode, lblock));
        trace_ext4_da_reserve_space(inode, md_needed);
-       spin_unlock(&ei->i_block_reservation_lock);
 
-       /*
-        * We will charge metadata quota at writeout time; this saves
-        * us from metadata over-estimation, though we may go over by
-        * a small amount in the end.  Here we just reserve for data.
-        */
-       ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
-       if (ret)
-               return ret;
        /*
         * We do still charge estimated metadata to the sb though;
         * we cannot afford to run out of free blocks.
         */
        if (ext4_claim_free_clusters(sbi, md_needed + 1, 0)) {
-               dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
+               ei->i_da_metadata_calc_len = save_len;
+               ei->i_da_metadata_calc_last_lblock = save_last_lblock;
+               spin_unlock(&ei->i_block_reservation_lock);
                if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
                        yield();
                        goto repeat;
                }
+               dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
                return -ENOSPC;
        }
-       spin_lock(&ei->i_block_reservation_lock);
        ei->i_reserved_data_blocks++;
        ei->i_reserved_meta_blocks += md_needed;
        spin_unlock(&ei->i_block_reservation_lock);
@@ -1925,9 +1956,6 @@ out:
        return ret;
 }
 
-static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode);
-static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
-
 /*
  * Note that we don't need to start a transaction unless we're journaling data
  * because we should have holes filled from ext4_page_mkwrite(). We even don't
@@ -2434,6 +2462,16 @@ static int ext4_nonda_switch(struct super_block *sb)
        free_blocks  = EXT4_C2B(sbi,
                percpu_counter_read_positive(&sbi->s_freeclusters_counter));
        dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyclusters_counter);
+       /*
+        * Start pushing delalloc when 1/2 of free blocks are dirty.
+        */
+       if (dirty_blocks && (free_blocks < 2 * dirty_blocks) &&
+           !writeback_in_progress(sb->s_bdi) &&
+           down_read_trylock(&sb->s_umount)) {
+               writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE);
+               up_read(&sb->s_umount);
+       }
+
        if (2 * free_blocks < 3 * dirty_blocks ||
                free_blocks < (dirty_blocks + EXT4_FREECLUSTERS_WATERMARK)) {
                /*
@@ -2442,13 +2480,6 @@ static int ext4_nonda_switch(struct super_block *sb)
                 */
                return 1;
        }
-       /*
-        * Even if we don't switch but are nearing capacity,
-        * start pushing delalloc when 1/2 of free blocks are dirty.
-        */
-       if (free_blocks < 2 * dirty_blocks)
-               writeback_inodes_sb_if_idle(sb, WB_REASON_FS_FREE_SPACE);
-
        return 0;
 }
 
@@ -2818,6 +2849,32 @@ static int ext4_get_block_write(struct inode *inode, sector_t iblock,
                               EXT4_GET_BLOCKS_IO_CREATE_EXT);
 }
 
+static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock,
+                  struct buffer_head *bh_result, int flags)
+{
+       handle_t *handle = ext4_journal_current_handle();
+       struct ext4_map_blocks map;
+       int ret = 0;
+
+       ext4_debug("ext4_get_block_write_nolock: inode %lu, flag %d\n",
+                  inode->i_ino, flags);
+
+       flags = EXT4_GET_BLOCKS_NO_LOCK;
+
+       map.m_lblk = iblock;
+       map.m_len = bh_result->b_size >> inode->i_blkbits;
+
+       ret = ext4_map_blocks(handle, inode, &map, flags);
+       if (ret > 0) {
+               map_bh(bh_result, inode->i_sb, map.m_pblk);
+               bh_result->b_state = (bh_result->b_state & ~EXT4_MAP_FLAGS) |
+                                       map.m_flags;
+               bh_result->b_size = inode->i_sb->s_blocksize * map.m_len;
+               ret = 0;
+       }
+       return ret;
+}
+
 static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
                            ssize_t size, void *private, int ret,
                            bool is_async)
@@ -2966,6 +3023,18 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
 
        loff_t final_size = offset + count;
        if (rw == WRITE && final_size <= inode->i_size) {
+               int overwrite = 0;
+
+               BUG_ON(iocb->private == NULL);
+
+               /* If we do a overwrite dio, i_mutex locking can be released */
+               overwrite = *((int *)iocb->private);
+
+               if (overwrite) {
+                       down_read(&EXT4_I(inode)->i_data_sem);
+                       mutex_unlock(&inode->i_mutex);
+               }
+
                /*
                 * We could direct write to holes and fallocate.
                 *
@@ -2987,12 +3056,14 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
                 * hook to the iocb.
                 */
                iocb->private = NULL;
-               EXT4_I(inode)->cur_aio_dio = NULL;
+               ext4_inode_aio_set(inode, NULL);
                if (!is_sync_kiocb(iocb)) {
                        ext4_io_end_t *io_end =
                                ext4_init_io_end(inode, GFP_NOFS);
-                       if (!io_end)
-                               return -ENOMEM;
+                       if (!io_end) {
+                               ret = -ENOMEM;
+                               goto retake_lock;
+                       }
                        io_end->flag |= EXT4_IO_END_DIRECT;
                        iocb->private = io_end;
                        /*
@@ -3002,18 +3073,27 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
                         * is a unwritten extents needs to be converted
                         * when IO is completed.
                         */
-                       EXT4_I(inode)->cur_aio_dio = iocb->private;
+                       ext4_inode_aio_set(inode, io_end);
                }
 
-               ret = __blockdev_direct_IO(rw, iocb, inode,
-                                        inode->i_sb->s_bdev, iov,
-                                        offset, nr_segs,
-                                        ext4_get_block_write,
-                                        ext4_end_io_dio,
-                                        NULL,
-                                        DIO_LOCKING);
+               if (overwrite)
+                       ret = __blockdev_direct_IO(rw, iocb, inode,
+                                                inode->i_sb->s_bdev, iov,
+                                                offset, nr_segs,
+                                                ext4_get_block_write_nolock,
+                                                ext4_end_io_dio,
+                                                NULL,
+                                                0);
+               else
+                       ret = __blockdev_direct_IO(rw, iocb, inode,
+                                                inode->i_sb->s_bdev, iov,
+                                                offset, nr_segs,
+                                                ext4_get_block_write,
+                                                ext4_end_io_dio,
+                                                NULL,
+                                                DIO_LOCKING);
                if (iocb->private)
-                       EXT4_I(inode)->cur_aio_dio = NULL;
+                       ext4_inode_aio_set(inode, NULL);
                /*
                 * The io_end structure takes a reference to the inode,
                 * that structure needs to be destroyed and the
@@ -3031,7 +3111,7 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
                if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) {
                        ext4_free_io_end(iocb->private);
                        iocb->private = NULL;
-               } else if (ret > 0 && ext4_test_inode_state(inode,
+               } else if (ret > 0 && !overwrite && ext4_test_inode_state(inode,
                                                EXT4_STATE_DIO_UNWRITTEN)) {
                        int err;
                        /*
@@ -3044,6 +3124,14 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
                                ret = err;
                        ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
                }
+
+       retake_lock:
+               /* take i_mutex locking again if we do a ovewrite dio */
+               if (overwrite) {
+                       up_read(&EXT4_I(inode)->i_data_sem);
+                       mutex_lock(&inode->i_mutex);
+               }
+
                return ret;
        }
 
@@ -3966,6 +4054,7 @@ static int ext4_do_update_inode(handle_t *handle,
        struct ext4_inode_info *ei = EXT4_I(inode);
        struct buffer_head *bh = iloc->bh;
        int err = 0, rc, block;
+       int need_datasync = 0;
        uid_t i_uid;
        gid_t i_gid;
 
@@ -4016,7 +4105,10 @@ static int ext4_do_update_inode(handle_t *handle,
                raw_inode->i_file_acl_high =
                        cpu_to_le16(ei->i_file_acl >> 32);
        raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
-       ext4_isize_set(raw_inode, ei->i_disksize);
+       if (ei->i_disksize != ext4_isize(raw_inode)) {
+               ext4_isize_set(raw_inode, ei->i_disksize);
+               need_datasync = 1;
+       }
        if (ei->i_disksize > 0x7fffffffULL) {
                struct super_block *sb = inode->i_sb;
                if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
@@ -4034,7 +4126,7 @@ static int ext4_do_update_inode(handle_t *handle,
                        EXT4_SET_RO_COMPAT_FEATURE(sb,
                                        EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
                        ext4_handle_sync(handle);
-                       err = ext4_handle_dirty_super_now(handle, sb);
+                       err = ext4_handle_dirty_super(handle, sb);
                }
        }
        raw_inode->i_generation = cpu_to_le32(inode->i_generation);
@@ -4069,7 +4161,7 @@ static int ext4_do_update_inode(handle_t *handle,
                err = rc;
        ext4_clear_inode_state(inode, EXT4_STATE_NEW);
 
-       ext4_update_inode_fsync_trans(handle, inode, 0);
+       ext4_update_inode_fsync_trans(handle, inode, need_datasync);
 out_brelse:
        brelse(bh);
        ext4_std_error(inode->i_sb, err);
@@ -4701,11 +4793,7 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
        get_block_t *get_block;
        int retries = 0;
 
-       /*
-        * This check is racy but catches the common case. We rely on
-        * __block_page_mkwrite() to do a reliable check.
-        */
-       vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
+       sb_start_pagefault(inode->i_sb);
        /* Delalloc case is easy... */
        if (test_opt(inode->i_sb, DELALLOC) &&
            !ext4_should_journal_data(inode) &&
@@ -4773,5 +4861,6 @@ retry_alloc:
 out_ret:
        ret = block_page_mkwrite_return(ret);
 out:
+       sb_end_pagefault(inode->i_sb);
        return ret;
 }